/[linux-patches]/genpatches-2.6/tags/2.6.32-15/1016_linux-2.6.32.17.patch
Gentoo

Contents of /genpatches-2.6/tags/2.6.32-15/1016_linux-2.6.32.17.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1735 - (show annotations) (download)
Wed Aug 4 11:25:09 2010 UTC (4 years, 4 months ago) by mpagano
File size: 240272 byte(s)
2.6.32-15 release
1 diff --git a/arch/arm/kernel/kprobes-decode.c b/arch/arm/kernel/kprobes-decode.c
2 index da1f949..8bccbfa 100644
3 --- a/arch/arm/kernel/kprobes-decode.c
4 +++ b/arch/arm/kernel/kprobes-decode.c
5 @@ -583,13 +583,14 @@ static void __kprobes emulate_ldr(struct kprobe *p, struct pt_regs *regs)
6 {
7 insn_llret_3arg_fn_t *i_fn = (insn_llret_3arg_fn_t *)&p->ainsn.insn[0];
8 kprobe_opcode_t insn = p->opcode;
9 + long ppc = (long)p->addr + 8;
10 union reg_pair fnr;
11 int rd = (insn >> 12) & 0xf;
12 int rn = (insn >> 16) & 0xf;
13 int rm = insn & 0xf;
14 long rdv;
15 - long rnv = regs->uregs[rn];
16 - long rmv = regs->uregs[rm]; /* rm/rmv may be invalid, don't care. */
17 + long rnv = (rn == 15) ? ppc : regs->uregs[rn];
18 + long rmv = (rm == 15) ? ppc : regs->uregs[rm];
19 long cpsr = regs->ARM_cpsr;
20
21 fnr.dr = insnslot_llret_3arg_rflags(rnv, 0, rmv, cpsr, i_fn);
22 diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
23 index e34d96a..6879cfe 100644
24 --- a/arch/arm/mach-omap2/board-rx51-peripherals.c
25 +++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
26 @@ -37,6 +37,10 @@
27 #define SYSTEM_REV_S_USES_VAUX3 0x8
28
29 static int board_keymap[] = {
30 + /*
31 + * Note that KEY(x, 8, KEY_XXX) entries represent "entrire row
32 + * connected to the ground" matrix state.
33 + */
34 KEY(0, 0, KEY_Q),
35 KEY(0, 1, KEY_O),
36 KEY(0, 2, KEY_P),
37 @@ -44,6 +48,7 @@ static int board_keymap[] = {
38 KEY(0, 4, KEY_BACKSPACE),
39 KEY(0, 6, KEY_A),
40 KEY(0, 7, KEY_S),
41 +
42 KEY(1, 0, KEY_W),
43 KEY(1, 1, KEY_D),
44 KEY(1, 2, KEY_F),
45 @@ -52,6 +57,7 @@ static int board_keymap[] = {
46 KEY(1, 5, KEY_J),
47 KEY(1, 6, KEY_K),
48 KEY(1, 7, KEY_L),
49 +
50 KEY(2, 0, KEY_E),
51 KEY(2, 1, KEY_DOT),
52 KEY(2, 2, KEY_UP),
53 @@ -59,6 +65,8 @@ static int board_keymap[] = {
54 KEY(2, 5, KEY_Z),
55 KEY(2, 6, KEY_X),
56 KEY(2, 7, KEY_C),
57 + KEY(2, 8, KEY_F9),
58 +
59 KEY(3, 0, KEY_R),
60 KEY(3, 1, KEY_V),
61 KEY(3, 2, KEY_B),
62 @@ -67,20 +75,23 @@ static int board_keymap[] = {
63 KEY(3, 5, KEY_SPACE),
64 KEY(3, 6, KEY_SPACE),
65 KEY(3, 7, KEY_LEFT),
66 +
67 KEY(4, 0, KEY_T),
68 KEY(4, 1, KEY_DOWN),
69 KEY(4, 2, KEY_RIGHT),
70 KEY(4, 4, KEY_LEFTCTRL),
71 KEY(4, 5, KEY_RIGHTALT),
72 KEY(4, 6, KEY_LEFTSHIFT),
73 + KEY(4, 8, KEY_F10),
74 +
75 KEY(5, 0, KEY_Y),
76 + KEY(5, 8, KEY_F11),
77 +
78 KEY(6, 0, KEY_U),
79 +
80 KEY(7, 0, KEY_I),
81 KEY(7, 1, KEY_F7),
82 KEY(7, 2, KEY_F8),
83 - KEY(0xff, 2, KEY_F9),
84 - KEY(0xff, 4, KEY_F10),
85 - KEY(0xff, 5, KEY_F11),
86 };
87
88 static struct matrix_keymap_data board_map_data = {
89 diff --git a/arch/arm/mach-realview/Kconfig b/arch/arm/mach-realview/Kconfig
90 index c48e1f2..6727c78 100644
91 --- a/arch/arm/mach-realview/Kconfig
92 +++ b/arch/arm/mach-realview/Kconfig
93 @@ -18,6 +18,7 @@ config REALVIEW_EB_ARM11MP
94 bool "Support ARM11MPCore tile"
95 depends on MACH_REALVIEW_EB
96 select CPU_V6
97 + select ARCH_HAS_BARRIERS if SMP
98 help
99 Enable support for the ARM11MPCore tile on the Realview platform.
100
101 @@ -35,6 +36,7 @@ config MACH_REALVIEW_PB11MP
102 select CPU_V6
103 select ARM_GIC
104 select HAVE_PATA_PLATFORM
105 + select ARCH_HAS_BARRIERS if SMP
106 help
107 Include support for the ARM(R) RealView MPCore Platform Baseboard.
108 PB11MPCore is a platform with an on-board ARM11MPCore and has
109 diff --git a/arch/arm/mach-realview/include/mach/barriers.h b/arch/arm/mach-realview/include/mach/barriers.h
110 new file mode 100644
111 index 0000000..0c5d749
112 --- /dev/null
113 +++ b/arch/arm/mach-realview/include/mach/barriers.h
114 @@ -0,0 +1,8 @@
115 +/*
116 + * Barriers redefined for RealView ARM11MPCore platforms with L220 cache
117 + * controller to work around hardware errata causing the outer_sync()
118 + * operation to deadlock the system.
119 + */
120 +#define mb() dsb()
121 +#define rmb() dmb()
122 +#define wmb() mb()
123 diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c
124 index ee09d26..e2cde52 100644
125 --- a/arch/ia64/mm/tlb.c
126 +++ b/arch/ia64/mm/tlb.c
127 @@ -120,7 +120,7 @@ static inline void down_spin(struct spinaphore *ss)
128 ia64_invala();
129
130 for (;;) {
131 - asm volatile ("ld4.c.nc %0=[%1]" : "=r"(serve) : "r"(&ss->serve) : "memory");
132 + asm volatile ("ld8.c.nc %0=[%1]" : "=r"(serve) : "r"(&ss->serve) : "memory");
133 if (time_before(t, serve))
134 return;
135 cpu_relax();
136 diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
137 index a581d60..608dc97 100644
138 --- a/arch/mips/include/asm/mipsregs.h
139 +++ b/arch/mips/include/asm/mipsregs.h
140 @@ -135,6 +135,12 @@
141 #define FPU_CSR_COND7 0x80000000 /* $fcc7 */
142
143 /*
144 + * Bits 18 - 20 of the FPU Status Register will be read as 0,
145 + * and should be written as zero.
146 + */
147 +#define FPU_CSR_RSVD 0x001c0000
148 +
149 +/*
150 * X the exception cause indicator
151 * E the exception enable
152 * S the sticky/flag bit
153 @@ -161,7 +167,8 @@
154 #define FPU_CSR_UDF_S 0x00000008
155 #define FPU_CSR_INE_S 0x00000004
156
157 -/* rounding mode */
158 +/* Bits 0 and 1 of FPU Status Register specify the rounding mode */
159 +#define FPU_CSR_RM 0x00000003
160 #define FPU_CSR_RN 0x0 /* nearest */
161 #define FPU_CSR_RZ 0x1 /* towards zero */
162 #define FPU_CSR_RU 0x2 /* towards +Infinity */
163 diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
164 index 454b539..c15d94b 100644
165 --- a/arch/mips/math-emu/cp1emu.c
166 +++ b/arch/mips/math-emu/cp1emu.c
167 @@ -75,6 +75,9 @@ struct mips_fpu_emulator_stats fpuemustats;
168 #define FPCREG_RID 0 /* $0 = revision id */
169 #define FPCREG_CSR 31 /* $31 = csr */
170
171 +/* Determine rounding mode from the RM bits of the FCSR */
172 +#define modeindex(v) ((v) & FPU_CSR_RM)
173 +
174 /* Convert Mips rounding mode (0..3) to IEEE library modes. */
175 static const unsigned char ieee_rm[4] = {
176 [FPU_CSR_RN] = IEEE754_RN,
177 @@ -381,10 +384,14 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx)
178 (void *) (xcp->cp0_epc),
179 MIPSInst_RT(ir), value);
180 #endif
181 - value &= (FPU_CSR_FLUSH | FPU_CSR_ALL_E | FPU_CSR_ALL_S | 0x03);
182 - ctx->fcr31 &= ~(FPU_CSR_FLUSH | FPU_CSR_ALL_E | FPU_CSR_ALL_S | 0x03);
183 - /* convert to ieee library modes */
184 - ctx->fcr31 |= (value & ~0x3) | ieee_rm[value & 0x3];
185 +
186 + /*
187 + * Don't write reserved bits,
188 + * and convert to ieee library modes
189 + */
190 + ctx->fcr31 = (value &
191 + ~(FPU_CSR_RSVD | FPU_CSR_RM)) |
192 + ieee_rm[modeindex(value)];
193 }
194 if ((ctx->fcr31 >> 5) & ctx->fcr31 & FPU_CSR_ALL_E) {
195 return SIGFPE;
196 diff --git a/arch/sparc/include/asm/stat.h b/arch/sparc/include/asm/stat.h
197 index 39327d6..a232e9e 100644
198 --- a/arch/sparc/include/asm/stat.h
199 +++ b/arch/sparc/include/asm/stat.h
200 @@ -53,8 +53,8 @@ struct stat {
201 ino_t st_ino;
202 mode_t st_mode;
203 short st_nlink;
204 - uid16_t st_uid;
205 - gid16_t st_gid;
206 + unsigned short st_uid;
207 + unsigned short st_gid;
208 unsigned short st_rdev;
209 off_t st_size;
210 time_t st_atime;
211 diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
212 index a7881c7..a7e502f 100644
213 --- a/arch/x86/include/asm/msr-index.h
214 +++ b/arch/x86/include/asm/msr-index.h
215 @@ -106,6 +106,7 @@
216 #define MSR_AMD64_PATCH_LOADER 0xc0010020
217 #define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140
218 #define MSR_AMD64_OSVW_STATUS 0xc0010141
219 +#define MSR_AMD64_DC_CFG 0xc0011022
220 #define MSR_AMD64_IBSFETCHCTL 0xc0011030
221 #define MSR_AMD64_IBSFETCHLINAD 0xc0011031
222 #define MSR_AMD64_IBSFETCHPHYSAD 0xc0011032
223 diff --git a/arch/x86/include/asm/suspend_32.h b/arch/x86/include/asm/suspend_32.h
224 index 48dcfa6..fd921c3 100644
225 --- a/arch/x86/include/asm/suspend_32.h
226 +++ b/arch/x86/include/asm/suspend_32.h
227 @@ -15,6 +15,8 @@ static inline int arch_prepare_suspend(void) { return 0; }
228 struct saved_context {
229 u16 es, fs, gs, ss;
230 unsigned long cr0, cr2, cr3, cr4;
231 + u64 misc_enable;
232 + bool misc_enable_saved;
233 struct desc_ptr gdt;
234 struct desc_ptr idt;
235 u16 ldt;
236 diff --git a/arch/x86/include/asm/suspend_64.h b/arch/x86/include/asm/suspend_64.h
237 index 06284f4..8d942af 100644
238 --- a/arch/x86/include/asm/suspend_64.h
239 +++ b/arch/x86/include/asm/suspend_64.h
240 @@ -27,6 +27,8 @@ struct saved_context {
241 u16 ds, es, fs, gs, ss;
242 unsigned long gs_base, gs_kernel_base, fs_base;
243 unsigned long cr0, cr2, cr3, cr4, cr8;
244 + u64 misc_enable;
245 + bool misc_enable_saved;
246 unsigned long efer;
247 u16 gdt_pad;
248 u16 gdt_limit;
249 diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
250 index f08f973..e0fbf29 100644
251 --- a/arch/x86/include/asm/system.h
252 +++ b/arch/x86/include/asm/system.h
253 @@ -449,7 +449,7 @@ void stop_this_cpu(void *dummy);
254 *
255 * (Could use an alternative three way for this if there was one.)
256 */
257 -static inline void rdtsc_barrier(void)
258 +static __always_inline void rdtsc_barrier(void)
259 {
260 alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
261 alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
262 diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c
263 index 2e837f5..fb7a5f0 100644
264 --- a/arch/x86/kernel/acpi/cstate.c
265 +++ b/arch/x86/kernel/acpi/cstate.c
266 @@ -145,6 +145,15 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
267 percpu_entry->states[cx->index].eax = cx->address;
268 percpu_entry->states[cx->index].ecx = MWAIT_ECX_INTERRUPT_BREAK;
269 }
270 +
271 + /*
272 + * For _CST FFH on Intel, if GAS.access_size bit 1 is cleared,
273 + * then we should skip checking BM_STS for this C-state.
274 + * ref: "Intel Processor Vendor-Specific ACPI Interface Specification"
275 + */
276 + if ((c->x86_vendor == X86_VENDOR_INTEL) && !(reg->access_size & 0x2))
277 + cx->bm_sts_skip = 1;
278 +
279 return retval;
280 }
281 EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe);
282 diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
283 index 8b85734..ca93638 100644
284 --- a/arch/x86/kernel/acpi/sleep.c
285 +++ b/arch/x86/kernel/acpi/sleep.c
286 @@ -162,8 +162,6 @@ static int __init acpi_sleep_setup(char *str)
287 #endif
288 if (strncmp(str, "old_ordering", 12) == 0)
289 acpi_old_suspend_ordering();
290 - if (strncmp(str, "sci_force_enable", 16) == 0)
291 - acpi_set_sci_en_on_resume();
292 str = strchr(str, ',');
293 if (str != NULL)
294 str += strspn(str, ", \t");
295 diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
296 index 168e172..bc9cd5a 100644
297 --- a/arch/x86/kernel/apic/apic.c
298 +++ b/arch/x86/kernel/apic/apic.c
299 @@ -941,7 +941,7 @@ void disable_local_APIC(void)
300 unsigned int value;
301
302 /* APIC hasn't been mapped yet */
303 - if (!apic_phys)
304 + if (!x2apic_mode && !apic_phys)
305 return;
306
307 clear_local_APIC();
308 diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
309 index 74f5a3f..19528ef 100644
310 --- a/arch/x86/kernel/hpet.c
311 +++ b/arch/x86/kernel/hpet.c
312 @@ -949,7 +949,7 @@ fs_initcall(hpet_late_init);
313
314 void hpet_disable(void)
315 {
316 - if (is_hpet_capable()) {
317 + if (is_hpet_capable() && hpet_virt_address) {
318 unsigned long cfg = hpet_readl(HPET_CFG);
319
320 if (hpet_legacy_int_enabled) {
321 diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
322 index e6ec8a2..1a2d4b1 100644
323 --- a/arch/x86/kernel/pci-calgary_64.c
324 +++ b/arch/x86/kernel/pci-calgary_64.c
325 @@ -102,11 +102,16 @@ int use_calgary __read_mostly = 0;
326 #define PMR_SOFTSTOPFAULT 0x40000000
327 #define PMR_HARDSTOP 0x20000000
328
329 -#define MAX_NUM_OF_PHBS 8 /* how many PHBs in total? */
330 -#define MAX_NUM_CHASSIS 8 /* max number of chassis */
331 -/* MAX_PHB_BUS_NUM is the maximal possible dev->bus->number */
332 -#define MAX_PHB_BUS_NUM (MAX_NUM_OF_PHBS * MAX_NUM_CHASSIS * 2)
333 -#define PHBS_PER_CALGARY 4
334 +/*
335 + * The maximum PHB bus number.
336 + * x3950M2 (rare): 8 chassis, 48 PHBs per chassis = 384
337 + * x3950M2: 4 chassis, 48 PHBs per chassis = 192
338 + * x3950 (PCIE): 8 chassis, 32 PHBs per chassis = 256
339 + * x3950 (PCIX): 8 chassis, 16 PHBs per chassis = 128
340 + */
341 +#define MAX_PHB_BUS_NUM 256
342 +
343 +#define PHBS_PER_CALGARY 4
344
345 /* register offsets in Calgary's internal register space */
346 static const unsigned long tar_offsets[] = {
347 @@ -1053,8 +1058,6 @@ static int __init calgary_init_one(struct pci_dev *dev)
348 struct iommu_table *tbl;
349 int ret;
350
351 - BUG_ON(dev->bus->number >= MAX_PHB_BUS_NUM);
352 -
353 bbar = busno_to_bbar(dev->bus->number);
354 ret = calgary_setup_tar(dev, bbar);
355 if (ret)
356 diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
357 index a265626..fdf2e28 100644
358 --- a/arch/x86/kvm/mmu.c
359 +++ b/arch/x86/kvm/mmu.c
360 @@ -1843,6 +1843,9 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
361
362 spte |= PT_WRITABLE_MASK;
363
364 + if (!tdp_enabled && !(pte_access & ACC_WRITE_MASK))
365 + spte &= ~PT_USER_MASK;
366 +
367 /*
368 * Optimization: for pte sync, if spte was writable the hash
369 * lookup is unnecessary (and expensive). Write protection
370 @@ -1898,6 +1901,8 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
371
372 child = page_header(pte & PT64_BASE_ADDR_MASK);
373 mmu_page_remove_parent_pte(child, sptep);
374 + __set_spte(sptep, shadow_trap_nonpresent_pte);
375 + kvm_flush_remote_tlbs(vcpu->kvm);
376 } else if (pfn != spte_to_pfn(*sptep)) {
377 pgprintk("hfn old %lx new %lx\n",
378 spte_to_pfn(*sptep), pfn);
379 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
380 index 8e65552..61ba669 100644
381 --- a/arch/x86/kvm/svm.c
382 +++ b/arch/x86/kvm/svm.c
383 @@ -27,6 +27,7 @@
384 #include <linux/sched.h>
385 #include <linux/ftrace_event.h>
386
387 +#include <asm/tlbflush.h>
388 #include <asm/desc.h>
389
390 #include <asm/virtext.h>
391 @@ -62,6 +63,8 @@ MODULE_LICENSE("GPL");
392 #define nsvm_printk(fmt, args...) do {} while(0)
393 #endif
394
395 +static bool erratum_383_found __read_mostly;
396 +
397 static const u32 host_save_user_msrs[] = {
398 #ifdef CONFIG_X86_64
399 MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE,
400 @@ -299,6 +302,31 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
401 svm_set_interrupt_shadow(vcpu, 0);
402 }
403
404 +static void svm_init_erratum_383(void)
405 +{
406 + u32 low, high;
407 + int err;
408 + u64 val;
409 +
410 + /* Only Fam10h is affected */
411 + if (boot_cpu_data.x86 != 0x10)
412 + return;
413 +
414 + /* Use _safe variants to not break nested virtualization */
415 + val = native_read_msr_safe(MSR_AMD64_DC_CFG, &err);
416 + if (err)
417 + return;
418 +
419 + val |= (1ULL << 47);
420 +
421 + low = lower_32_bits(val);
422 + high = upper_32_bits(val);
423 +
424 + native_write_msr_safe(MSR_AMD64_DC_CFG, low, high);
425 +
426 + erratum_383_found = true;
427 +}
428 +
429 static int has_svm(void)
430 {
431 const char *msg;
432 @@ -318,7 +346,6 @@ static void svm_hardware_disable(void *garbage)
433
434 static void svm_hardware_enable(void *garbage)
435 {
436 -
437 struct svm_cpu_data *svm_data;
438 uint64_t efer;
439 struct descriptor_table gdt_descr;
440 @@ -350,6 +377,10 @@ static void svm_hardware_enable(void *garbage)
441
442 wrmsrl(MSR_VM_HSAVE_PA,
443 page_to_pfn(svm_data->save_area) << PAGE_SHIFT);
444 +
445 + svm_init_erratum_383();
446 +
447 + return;
448 }
449
450 static void svm_cpu_uninit(int cpu)
451 @@ -1257,8 +1288,59 @@ static int nm_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
452 return 1;
453 }
454
455 -static int mc_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
456 +static bool is_erratum_383(void)
457 {
458 + int err, i;
459 + u64 value;
460 +
461 + if (!erratum_383_found)
462 + return false;
463 +
464 + value = native_read_msr_safe(MSR_IA32_MC0_STATUS, &err);
465 + if (err)
466 + return false;
467 +
468 + /* Bit 62 may or may not be set for this mce */
469 + value &= ~(1ULL << 62);
470 +
471 + if (value != 0xb600000000010015ULL)
472 + return false;
473 +
474 + /* Clear MCi_STATUS registers */
475 + for (i = 0; i < 6; ++i)
476 + native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0, 0);
477 +
478 + value = native_read_msr_safe(MSR_IA32_MCG_STATUS, &err);
479 + if (!err) {
480 + u32 low, high;
481 +
482 + value &= ~(1ULL << 2);
483 + low = lower_32_bits(value);
484 + high = upper_32_bits(value);
485 +
486 + native_write_msr_safe(MSR_IA32_MCG_STATUS, low, high);
487 + }
488 +
489 + /* Flush tlb to evict multi-match entries */
490 + __flush_tlb_all();
491 +
492 + return true;
493 +}
494 +
495 +static void svm_handle_mce(struct vcpu_svm *svm)
496 +{
497 + if (is_erratum_383()) {
498 + /*
499 + * Erratum 383 triggered. Guest state is corrupt so kill the
500 + * guest.
501 + */
502 + pr_err("KVM: Guest triggered AMD Erratum 383\n");
503 +
504 + set_bit(KVM_REQ_TRIPLE_FAULT, &svm->vcpu.requests);
505 +
506 + return;
507 + }
508 +
509 /*
510 * On an #MC intercept the MCE handler is not called automatically in
511 * the host. So do it by hand here.
512 @@ -1267,6 +1349,11 @@ static int mc_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
513 "int $0x12\n");
514 /* not sure if we ever come back to this point */
515
516 + return;
517 +}
518 +
519 +static int mc_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
520 +{
521 return 1;
522 }
523
524 @@ -2717,6 +2804,14 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
525 vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR);
526 vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR);
527 }
528 +
529 + /*
530 + * We need to handle MC intercepts here before the vcpu has a chance to
531 + * change the physical cpu
532 + */
533 + if (unlikely(svm->vmcb->control.exit_code ==
534 + SVM_EXIT_EXCP_BASE + MC_VECTOR))
535 + svm_handle_mce(svm);
536 }
537
538 #undef R
539 diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
540 index 8aa85f1..eeeb522 100644
541 --- a/arch/x86/power/cpu.c
542 +++ b/arch/x86/power/cpu.c
543 @@ -104,6 +104,8 @@ static void __save_processor_state(struct saved_context *ctxt)
544 ctxt->cr4 = read_cr4();
545 ctxt->cr8 = read_cr8();
546 #endif
547 + ctxt->misc_enable_saved = !rdmsrl_safe(MSR_IA32_MISC_ENABLE,
548 + &ctxt->misc_enable);
549 }
550
551 /* Needed by apm.c */
552 @@ -176,6 +178,8 @@ static void fix_processor_context(void)
553 */
554 static void __restore_processor_state(struct saved_context *ctxt)
555 {
556 + if (ctxt->misc_enable_saved)
557 + wrmsrl(MSR_IA32_MISC_ENABLE, ctxt->misc_enable);
558 /*
559 * control registers
560 */
561 diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
562 index c8f0797..a6ad608 100644
563 --- a/drivers/acpi/processor_idle.c
564 +++ b/drivers/acpi/processor_idle.c
565 @@ -962,7 +962,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
566 if (acpi_idle_suspend)
567 return(acpi_idle_enter_c1(dev, state));
568
569 - if (acpi_idle_bm_check()) {
570 + if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
571 if (dev->safe_state) {
572 dev->last_state = dev->safe_state;
573 return dev->safe_state->enter(dev, dev->safe_state);
574 diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
575 index 9ed9292..0458094 100644
576 --- a/drivers/acpi/sleep.c
577 +++ b/drivers/acpi/sleep.c
578 @@ -80,22 +80,6 @@ static int acpi_sleep_prepare(u32 acpi_state)
579
580 #ifdef CONFIG_ACPI_SLEEP
581 static u32 acpi_target_sleep_state = ACPI_STATE_S0;
582 -/*
583 - * According to the ACPI specification the BIOS should make sure that ACPI is
584 - * enabled and SCI_EN bit is set on wake-up from S1 - S3 sleep states. Still,
585 - * some BIOSes don't do that and therefore we use acpi_enable() to enable ACPI
586 - * on such systems during resume. Unfortunately that doesn't help in
587 - * particularly pathological cases in which SCI_EN has to be set directly on
588 - * resume, although the specification states very clearly that this flag is
589 - * owned by the hardware. The set_sci_en_on_resume variable will be set in such
590 - * cases.
591 - */
592 -static bool set_sci_en_on_resume;
593 -
594 -void __init acpi_set_sci_en_on_resume(void)
595 -{
596 - set_sci_en_on_resume = true;
597 -}
598
599 /*
600 * ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the
601 @@ -253,11 +237,8 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
602 break;
603 }
604
605 - /* If ACPI is not enabled by the BIOS, we need to enable it here. */
606 - if (set_sci_en_on_resume)
607 - acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1);
608 - else
609 - acpi_enable();
610 + /* This violates the spec but is required for bug compatibility. */
611 + acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1);
612
613 /* Reprogram control registers and execute _BFS */
614 acpi_leave_sleep_state_prep(acpi_state);
615 @@ -346,12 +327,6 @@ static int __init init_old_suspend_ordering(const struct dmi_system_id *d)
616 return 0;
617 }
618
619 -static int __init init_set_sci_en_on_resume(const struct dmi_system_id *d)
620 -{
621 - set_sci_en_on_resume = true;
622 - return 0;
623 -}
624 -
625 static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
626 {
627 .callback = init_old_suspend_ordering,
628 @@ -370,22 +345,6 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
629 },
630 },
631 {
632 - .callback = init_set_sci_en_on_resume,
633 - .ident = "Apple MacBook 1,1",
634 - .matches = {
635 - DMI_MATCH(DMI_SYS_VENDOR, "Apple Computer, Inc."),
636 - DMI_MATCH(DMI_PRODUCT_NAME, "MacBook1,1"),
637 - },
638 - },
639 - {
640 - .callback = init_set_sci_en_on_resume,
641 - .ident = "Apple MacMini 1,1",
642 - .matches = {
643 - DMI_MATCH(DMI_SYS_VENDOR, "Apple Computer, Inc."),
644 - DMI_MATCH(DMI_PRODUCT_NAME, "Macmini1,1"),
645 - },
646 - },
647 - {
648 .callback = init_old_suspend_ordering,
649 .ident = "Asus Pundit P1-AH2 (M2N8L motherboard)",
650 .matches = {
651 @@ -394,94 +353,6 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
652 },
653 },
654 {
655 - .callback = init_set_sci_en_on_resume,
656 - .ident = "Toshiba Satellite L300",
657 - .matches = {
658 - DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
659 - DMI_MATCH(DMI_PRODUCT_NAME, "Satellite L300"),
660 - },
661 - },
662 - {
663 - .callback = init_set_sci_en_on_resume,
664 - .ident = "Hewlett-Packard HP G7000 Notebook PC",
665 - .matches = {
666 - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
667 - DMI_MATCH(DMI_PRODUCT_NAME, "HP G7000 Notebook PC"),
668 - },
669 - },
670 - {
671 - .callback = init_set_sci_en_on_resume,
672 - .ident = "Hewlett-Packard HP Pavilion dv3 Notebook PC",
673 - .matches = {
674 - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
675 - DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv3 Notebook PC"),
676 - },
677 - },
678 - {
679 - .callback = init_set_sci_en_on_resume,
680 - .ident = "Hewlett-Packard Pavilion dv4",
681 - .matches = {
682 - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
683 - DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4"),
684 - },
685 - },
686 - {
687 - .callback = init_set_sci_en_on_resume,
688 - .ident = "Hewlett-Packard Pavilion dv7",
689 - .matches = {
690 - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
691 - DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv7"),
692 - },
693 - },
694 - {
695 - .callback = init_set_sci_en_on_resume,
696 - .ident = "Hewlett-Packard Compaq Presario C700 Notebook PC",
697 - .matches = {
698 - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
699 - DMI_MATCH(DMI_PRODUCT_NAME, "Compaq Presario C700 Notebook PC"),
700 - },
701 - },
702 - {
703 - .callback = init_set_sci_en_on_resume,
704 - .ident = "Hewlett-Packard Compaq Presario CQ40 Notebook PC",
705 - .matches = {
706 - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
707 - DMI_MATCH(DMI_PRODUCT_NAME, "Compaq Presario CQ40 Notebook PC"),
708 - },
709 - },
710 - {
711 - .callback = init_set_sci_en_on_resume,
712 - .ident = "Lenovo ThinkPad T410",
713 - .matches = {
714 - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
715 - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T410"),
716 - },
717 - },
718 - {
719 - .callback = init_set_sci_en_on_resume,
720 - .ident = "Lenovo ThinkPad T510",
721 - .matches = {
722 - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
723 - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T510"),
724 - },
725 - },
726 - {
727 - .callback = init_set_sci_en_on_resume,
728 - .ident = "Lenovo ThinkPad W510",
729 - .matches = {
730 - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
731 - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W510"),
732 - },
733 - },
734 - {
735 - .callback = init_set_sci_en_on_resume,
736 - .ident = "Lenovo ThinkPad X201[s]",
737 - .matches = {
738 - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
739 - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201"),
740 - },
741 - },
742 - {
743 .callback = init_old_suspend_ordering,
744 .ident = "Panasonic CF51-2L",
745 .matches = {
746 @@ -490,30 +361,6 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
747 DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"),
748 },
749 },
750 - {
751 - .callback = init_set_sci_en_on_resume,
752 - .ident = "Dell Studio 1558",
753 - .matches = {
754 - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
755 - DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1558"),
756 - },
757 - },
758 - {
759 - .callback = init_set_sci_en_on_resume,
760 - .ident = "Dell Studio 1557",
761 - .matches = {
762 - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
763 - DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1557"),
764 - },
765 - },
766 - {
767 - .callback = init_set_sci_en_on_resume,
768 - .ident = "Dell Studio 1555",
769 - .matches = {
770 - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
771 - DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1555"),
772 - },
773 - },
774 {},
775 };
776 #endif /* CONFIG_SUSPEND */
777 diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
778 index 7376367..e0e6570 100644
779 --- a/drivers/base/firmware_class.c
780 +++ b/drivers/base/firmware_class.c
781 @@ -125,6 +125,17 @@ static ssize_t firmware_loading_show(struct device *dev,
782 return sprintf(buf, "%d\n", loading);
783 }
784
785 +static void firmware_free_data(const struct firmware *fw)
786 +{
787 + int i;
788 + vunmap(fw->data);
789 + if (fw->pages) {
790 + for (i = 0; i < PFN_UP(fw->size); i++)
791 + __free_page(fw->pages[i]);
792 + kfree(fw->pages);
793 + }
794 +}
795 +
796 /* Some architectures don't have PAGE_KERNEL_RO */
797 #ifndef PAGE_KERNEL_RO
798 #define PAGE_KERNEL_RO PAGE_KERNEL
799 @@ -157,21 +168,21 @@ static ssize_t firmware_loading_store(struct device *dev,
800 mutex_unlock(&fw_lock);
801 break;
802 }
803 - vfree(fw_priv->fw->data);
804 - fw_priv->fw->data = NULL;
805 + firmware_free_data(fw_priv->fw);
806 + memset(fw_priv->fw, 0, sizeof(struct firmware));
807 + /* If the pages are not owned by 'struct firmware' */
808 for (i = 0; i < fw_priv->nr_pages; i++)
809 __free_page(fw_priv->pages[i]);
810 kfree(fw_priv->pages);
811 fw_priv->pages = NULL;
812 fw_priv->page_array_size = 0;
813 fw_priv->nr_pages = 0;
814 - fw_priv->fw->size = 0;
815 set_bit(FW_STATUS_LOADING, &fw_priv->status);
816 mutex_unlock(&fw_lock);
817 break;
818 case 0:
819 if (test_bit(FW_STATUS_LOADING, &fw_priv->status)) {
820 - vfree(fw_priv->fw->data);
821 + vunmap(fw_priv->fw->data);
822 fw_priv->fw->data = vmap(fw_priv->pages,
823 fw_priv->nr_pages,
824 0, PAGE_KERNEL_RO);
825 @@ -179,7 +190,10 @@ static ssize_t firmware_loading_store(struct device *dev,
826 dev_err(dev, "%s: vmap() failed\n", __func__);
827 goto err;
828 }
829 - /* Pages will be freed by vfree() */
830 + /* Pages are now owned by 'struct firmware' */
831 + fw_priv->fw->pages = fw_priv->pages;
832 + fw_priv->pages = NULL;
833 +
834 fw_priv->page_array_size = 0;
835 fw_priv->nr_pages = 0;
836 complete(&fw_priv->completion);
837 @@ -572,7 +586,7 @@ release_firmware(const struct firmware *fw)
838 if (fw->data == builtin->data)
839 goto free_fw;
840 }
841 - vfree(fw->data);
842 + firmware_free_data(fw);
843 free_fw:
844 kfree(fw);
845 }
846 diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
847 index 2fb2e6c..c496c8a 100644
848 --- a/drivers/char/agp/amd64-agp.c
849 +++ b/drivers/char/agp/amd64-agp.c
850 @@ -499,6 +499,10 @@ static int __devinit agp_amd64_probe(struct pci_dev *pdev,
851 u8 cap_ptr;
852 int err;
853
854 + /* The Highlander principle */
855 + if (agp_bridges_found)
856 + return -ENODEV;
857 +
858 cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
859 if (!cap_ptr)
860 return -ENODEV;
861 @@ -562,6 +566,8 @@ static void __devexit agp_amd64_remove(struct pci_dev *pdev)
862 amd64_aperture_sizes[bridge->aperture_size_idx].size);
863 agp_remove_bridge(bridge);
864 agp_put_bridge(bridge);
865 +
866 + agp_bridges_found--;
867 }
868
869 #ifdef CONFIG_PM
870 @@ -709,6 +715,11 @@ static struct pci_device_id agp_amd64_pci_table[] = {
871
872 MODULE_DEVICE_TABLE(pci, agp_amd64_pci_table);
873
874 +static DEFINE_PCI_DEVICE_TABLE(agp_amd64_pci_promisc_table) = {
875 + { PCI_DEVICE_CLASS(0, 0) },
876 + { }
877 +};
878 +
879 static struct pci_driver agp_amd64_pci_driver = {
880 .name = "agpgart-amd64",
881 .id_table = agp_amd64_pci_table,
882 @@ -733,7 +744,6 @@ int __init agp_amd64_init(void)
883 return err;
884
885 if (agp_bridges_found == 0) {
886 - struct pci_dev *dev;
887 if (!agp_try_unsupported && !agp_try_unsupported_boot) {
888 printk(KERN_INFO PFX "No supported AGP bridge found.\n");
889 #ifdef MODULE
890 @@ -749,17 +759,10 @@ int __init agp_amd64_init(void)
891 return -ENODEV;
892
893 /* Look for any AGP bridge */
894 - dev = NULL;
895 - err = -ENODEV;
896 - for_each_pci_dev(dev) {
897 - if (!pci_find_capability(dev, PCI_CAP_ID_AGP))
898 - continue;
899 - /* Only one bridge supported right now */
900 - if (agp_amd64_probe(dev, NULL) == 0) {
901 - err = 0;
902 - break;
903 - }
904 - }
905 + agp_amd64_pci_driver.id_table = agp_amd64_pci_promisc_table;
906 + err = driver_attach(&agp_amd64_pci_driver.driver);
907 + if (err == 0 && agp_bridges_found == 0)
908 + err = -ENODEV;
909 }
910 return err;
911 }
912 diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
913 index 8e00b4d..792868d 100644
914 --- a/drivers/char/tpm/tpm.h
915 +++ b/drivers/char/tpm/tpm.h
916 @@ -224,6 +224,7 @@ struct tpm_readpubek_params_out {
917 u8 algorithm[4];
918 u8 encscheme[2];
919 u8 sigscheme[2];
920 + __be32 paramsize;
921 u8 parameters[12]; /*assuming RSA*/
922 __be32 keysize;
923 u8 modulus[256];
924 diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
925 index 2405f17..ca15c04 100644
926 --- a/drivers/char/tpm/tpm_tis.c
927 +++ b/drivers/char/tpm/tpm_tis.c
928 @@ -622,7 +622,14 @@ static int tpm_tis_pnp_suspend(struct pnp_dev *dev, pm_message_t msg)
929
930 static int tpm_tis_pnp_resume(struct pnp_dev *dev)
931 {
932 - return tpm_pm_resume(&dev->dev);
933 + struct tpm_chip *chip = pnp_get_drvdata(dev);
934 + int ret;
935 +
936 + ret = tpm_pm_resume(&dev->dev);
937 + if (!ret)
938 + tpm_continue_selftest(chip);
939 +
940 + return ret;
941 }
942
943 static struct pnp_device_id tpm_pnp_tbl[] __devinitdata = {
944 diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
945 index ff57c40..c18e65e 100644
946 --- a/drivers/cpufreq/cpufreq.c
947 +++ b/drivers/cpufreq/cpufreq.c
948 @@ -1741,17 +1741,8 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data,
949 dprintk("governor switch\n");
950
951 /* end old governor */
952 - if (data->governor) {
953 - /*
954 - * Need to release the rwsem around governor
955 - * stop due to lock dependency between
956 - * cancel_delayed_work_sync and the read lock
957 - * taken in the delayed work handler.
958 - */
959 - unlock_policy_rwsem_write(data->cpu);
960 + if (data->governor)
961 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
962 - lock_policy_rwsem_write(data->cpu);
963 - }
964
965 /* start new governor */
966 data->governor = policy->governor;
967 diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
968 index 7f436ec..5449239 100644
969 --- a/drivers/gpu/drm/i915/i915_drv.c
970 +++ b/drivers/gpu/drm/i915/i915_drv.c
971 @@ -192,6 +192,7 @@ int i965_reset(struct drm_device *dev, u8 flags)
972 }
973 } else {
974 DRM_ERROR("Error occurred. Don't know how to reset this chip.\n");
975 + mutex_unlock(&dev->struct_mutex);
976 return -ENODEV;
977 }
978
979 diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
980 index df6f7c9..952c844 100644
981 --- a/drivers/gpu/drm/i915/i915_gem.c
982 +++ b/drivers/gpu/drm/i915/i915_gem.c
983 @@ -4697,6 +4697,16 @@ i915_gem_load(struct drm_device *dev)
984 list_add(&dev_priv->mm.shrink_list, &shrink_list);
985 spin_unlock(&shrink_list_lock);
986
987 + /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
988 + if (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
989 + u32 tmp = I915_READ(MI_ARB_STATE);
990 + if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) {
991 + /* arb state is a masked write, so set bit + bit in mask */
992 + tmp = MI_ARB_C3_LP_WRITE_ENABLE | (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT);
993 + I915_WRITE(MI_ARB_STATE, tmp);
994 + }
995 + }
996 +
997 /* Old X drivers will take 0-2 for front, back, depth buffers */
998 dev_priv->fence_reg_start = 3;
999
1000 diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
1001 index 73e7ec0..7214c85 100644
1002 --- a/drivers/gpu/drm/i915/i915_reg.h
1003 +++ b/drivers/gpu/drm/i915/i915_reg.h
1004 @@ -307,6 +307,70 @@
1005 #define LM_BURST_LENGTH 0x00000700
1006 #define LM_FIFO_WATERMARK 0x0000001F
1007 #define MI_ARB_STATE 0x020e4 /* 915+ only */
1008 +#define MI_ARB_MASK_SHIFT 16 /* shift for enable bits */
1009 +
1010 +/* Make render/texture TLB fetches lower priorty than associated data
1011 + * fetches. This is not turned on by default
1012 + */
1013 +#define MI_ARB_RENDER_TLB_LOW_PRIORITY (1 << 15)
1014 +
1015 +/* Isoch request wait on GTT enable (Display A/B/C streams).
1016 + * Make isoch requests stall on the TLB update. May cause
1017 + * display underruns (test mode only)
1018 + */
1019 +#define MI_ARB_ISOCH_WAIT_GTT (1 << 14)
1020 +
1021 +/* Block grant count for isoch requests when block count is
1022 + * set to a finite value.
1023 + */
1024 +#define MI_ARB_BLOCK_GRANT_MASK (3 << 12)
1025 +#define MI_ARB_BLOCK_GRANT_8 (0 << 12) /* for 3 display planes */
1026 +#define MI_ARB_BLOCK_GRANT_4 (1 << 12) /* for 2 display planes */
1027 +#define MI_ARB_BLOCK_GRANT_2 (2 << 12) /* for 1 display plane */
1028 +#define MI_ARB_BLOCK_GRANT_0 (3 << 12) /* don't use */
1029 +
1030 +/* Enable render writes to complete in C2/C3/C4 power states.
1031 + * If this isn't enabled, render writes are prevented in low
1032 + * power states. That seems bad to me.
1033 + */
1034 +#define MI_ARB_C3_LP_WRITE_ENABLE (1 << 11)
1035 +
1036 +/* This acknowledges an async flip immediately instead
1037 + * of waiting for 2TLB fetches.
1038 + */
1039 +#define MI_ARB_ASYNC_FLIP_ACK_IMMEDIATE (1 << 10)
1040 +
1041 +/* Enables non-sequential data reads through arbiter
1042 + */
1043 +#define MI_ARB_DUAL_DATA_PHASE_DISABLE (1 << 9)
1044 +
1045 +/* Disable FSB snooping of cacheable write cycles from binner/render
1046 + * command stream
1047 + */
1048 +#define MI_ARB_CACHE_SNOOP_DISABLE (1 << 8)
1049 +
1050 +/* Arbiter time slice for non-isoch streams */
1051 +#define MI_ARB_TIME_SLICE_MASK (7 << 5)
1052 +#define MI_ARB_TIME_SLICE_1 (0 << 5)
1053 +#define MI_ARB_TIME_SLICE_2 (1 << 5)
1054 +#define MI_ARB_TIME_SLICE_4 (2 << 5)
1055 +#define MI_ARB_TIME_SLICE_6 (3 << 5)
1056 +#define MI_ARB_TIME_SLICE_8 (4 << 5)
1057 +#define MI_ARB_TIME_SLICE_10 (5 << 5)
1058 +#define MI_ARB_TIME_SLICE_14 (6 << 5)
1059 +#define MI_ARB_TIME_SLICE_16 (7 << 5)
1060 +
1061 +/* Low priority grace period page size */
1062 +#define MI_ARB_LOW_PRIORITY_GRACE_4KB (0 << 4) /* default */
1063 +#define MI_ARB_LOW_PRIORITY_GRACE_8KB (1 << 4)
1064 +
1065 +/* Disable display A/B trickle feed */
1066 +#define MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE (1 << 2)
1067 +
1068 +/* Set display plane priority */
1069 +#define MI_ARB_DISPLAY_PRIORITY_A_B (0 << 0) /* display A > display B */
1070 +#define MI_ARB_DISPLAY_PRIORITY_B_A (1 << 0) /* display B > display A */
1071 +
1072 #define CACHE_MODE_0 0x02120 /* 915+ only */
1073 #define CM0_MASK_SHIFT 16
1074 #define CM0_IZ_OPT_DISABLE (1<<6)
1075 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
1076 index cce453e..88d5e3a 100644
1077 --- a/drivers/gpu/drm/i915/intel_display.c
1078 +++ b/drivers/gpu/drm/i915/intel_display.c
1079 @@ -785,8 +785,8 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
1080 intel_clock_t clock;
1081 int max_n;
1082 bool found;
1083 - /* approximately equals target * 0.00488 */
1084 - int err_most = (target >> 8) + (target >> 10);
1085 + /* approximately equals target * 0.00585 */
1086 + int err_most = (target >> 8) + (target >> 9);
1087 found = false;
1088
1089 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
1090 diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
1091 index eb740fc..ccf42c3 100644
1092 --- a/drivers/gpu/drm/radeon/r200.c
1093 +++ b/drivers/gpu/drm/radeon/r200.c
1094 @@ -368,6 +368,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
1095 /* 2D, 3D, CUBE */
1096 switch (tmp) {
1097 case 0:
1098 + case 3:
1099 + case 4:
1100 case 5:
1101 case 6:
1102 case 7:
1103 diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
1104 index a4813c6..e5e22b1 100644
1105 --- a/drivers/gpu/drm/radeon/radeon_atombios.c
1106 +++ b/drivers/gpu/drm/radeon/radeon_atombios.c
1107 @@ -161,6 +161,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
1108 }
1109 }
1110
1111 + /* ASUS HD 3600 board lists the DVI port as HDMI */
1112 + if ((dev->pdev->device == 0x9598) &&
1113 + (dev->pdev->subsystem_vendor == 0x1043) &&
1114 + (dev->pdev->subsystem_device == 0x01e4)) {
1115 + if (*connector_type == DRM_MODE_CONNECTOR_HDMIA) {
1116 + *connector_type = DRM_MODE_CONNECTOR_DVII;
1117 + }
1118 + }
1119 +
1120 /* ASUS HD 3450 board lists the DVI port as HDMI */
1121 if ((dev->pdev->device == 0x95C5) &&
1122 (dev->pdev->subsystem_vendor == 0x1043) &&
1123 diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
1124 index 0038212..183bef8 100644
1125 --- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
1126 +++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
1127 @@ -89,6 +89,7 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
1128 udelay(panel_pwr_delay * 1000);
1129 WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
1130 WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl);
1131 + udelay(panel_pwr_delay * 1000);
1132 break;
1133 }
1134
1135 diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
1136 index cb4290a..5852191 100644
1137 --- a/drivers/hwmon/coretemp.c
1138 +++ b/drivers/hwmon/coretemp.c
1139 @@ -53,6 +53,7 @@ struct coretemp_data {
1140 struct mutex update_lock;
1141 const char *name;
1142 u32 id;
1143 + u16 core_id;
1144 char valid; /* zero until following fields are valid */
1145 unsigned long last_updated; /* in jiffies */
1146 int temp;
1147 @@ -75,7 +76,7 @@ static ssize_t show_name(struct device *dev, struct device_attribute
1148 if (attr->index == SHOW_NAME)
1149 ret = sprintf(buf, "%s\n", data->name);
1150 else /* show label */
1151 - ret = sprintf(buf, "Core %d\n", data->id);
1152 + ret = sprintf(buf, "Core %d\n", data->core_id);
1153 return ret;
1154 }
1155
1156 @@ -255,6 +256,9 @@ static int __devinit coretemp_probe(struct platform_device *pdev)
1157 }
1158
1159 data->id = pdev->id;
1160 +#ifdef CONFIG_SMP
1161 + data->core_id = c->cpu_core_id;
1162 +#endif
1163 data->name = "coretemp";
1164 mutex_init(&data->update_lock);
1165
1166 @@ -352,6 +356,10 @@ struct pdev_entry {
1167 struct list_head list;
1168 struct platform_device *pdev;
1169 unsigned int cpu;
1170 +#ifdef CONFIG_SMP
1171 + u16 phys_proc_id;
1172 + u16 cpu_core_id;
1173 +#endif
1174 };
1175
1176 static LIST_HEAD(pdev_list);
1177 @@ -362,6 +370,22 @@ static int __cpuinit coretemp_device_add(unsigned int cpu)
1178 int err;
1179 struct platform_device *pdev;
1180 struct pdev_entry *pdev_entry;
1181 +#ifdef CONFIG_SMP
1182 + struct cpuinfo_x86 *c = &cpu_data(cpu);
1183 +#endif
1184 +
1185 + mutex_lock(&pdev_list_mutex);
1186 +
1187 +#ifdef CONFIG_SMP
1188 + /* Skip second HT entry of each core */
1189 + list_for_each_entry(pdev_entry, &pdev_list, list) {
1190 + if (c->phys_proc_id == pdev_entry->phys_proc_id &&
1191 + c->cpu_core_id == pdev_entry->cpu_core_id) {
1192 + err = 0; /* Not an error */
1193 + goto exit;
1194 + }
1195 + }
1196 +#endif
1197
1198 pdev = platform_device_alloc(DRVNAME, cpu);
1199 if (!pdev) {
1200 @@ -385,7 +409,10 @@ static int __cpuinit coretemp_device_add(unsigned int cpu)
1201
1202 pdev_entry->pdev = pdev;
1203 pdev_entry->cpu = cpu;
1204 - mutex_lock(&pdev_list_mutex);
1205 +#ifdef CONFIG_SMP
1206 + pdev_entry->phys_proc_id = c->phys_proc_id;
1207 + pdev_entry->cpu_core_id = c->cpu_core_id;
1208 +#endif
1209 list_add_tail(&pdev_entry->list, &pdev_list);
1210 mutex_unlock(&pdev_list_mutex);
1211
1212 @@ -396,6 +423,7 @@ exit_device_free:
1213 exit_device_put:
1214 platform_device_put(pdev);
1215 exit:
1216 + mutex_unlock(&pdev_list_mutex);
1217 return err;
1218 }
1219
1220 diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
1221 index a3749cb..497476f 100644
1222 --- a/drivers/hwmon/it87.c
1223 +++ b/drivers/hwmon/it87.c
1224 @@ -80,6 +80,13 @@ superio_inb(int reg)
1225 return inb(VAL);
1226 }
1227
1228 +static inline void
1229 +superio_outb(int reg, int val)
1230 +{
1231 + outb(reg, REG);
1232 + outb(val, VAL);
1233 +}
1234 +
1235 static int superio_inw(int reg)
1236 {
1237 int val;
1238 @@ -1036,6 +1043,21 @@ static int __init it87_find(unsigned short *address,
1239 sio_data->vid_value = superio_inb(IT87_SIO_VID_REG);
1240
1241 reg = superio_inb(IT87_SIO_PINX2_REG);
1242 + /*
1243 + * The IT8720F has no VIN7 pin, so VCCH should always be
1244 + * routed internally to VIN7 with an internal divider.
1245 + * Curiously, there still is a configuration bit to control
1246 + * this, which means it can be set incorrectly. And even
1247 + * more curiously, many boards out there are improperly
1248 + * configured, even though the IT8720F datasheet claims
1249 + * that the internal routing of VCCH to VIN7 is the default
1250 + * setting. So we force the internal routing in this case.
1251 + */
1252 + if (sio_data->type == it8720 && !(reg & (1 << 1))) {
1253 + reg |= (1 << 1);
1254 + superio_outb(IT87_SIO_PINX2_REG, reg);
1255 + pr_notice("it87: Routing internal VCCH to in7\n");
1256 + }
1257 if (reg & (1 << 0))
1258 pr_info("it87: in3 is VCC (+5V)\n");
1259 if (reg & (1 << 1))
1260 diff --git a/drivers/hwmon/k8temp.c b/drivers/hwmon/k8temp.c
1261 index 1fe9951..f808d18 100644
1262 --- a/drivers/hwmon/k8temp.c
1263 +++ b/drivers/hwmon/k8temp.c
1264 @@ -120,7 +120,7 @@ static ssize_t show_temp(struct device *dev,
1265 int temp;
1266 struct k8temp_data *data = k8temp_update_device(dev);
1267
1268 - if (data->swap_core_select)
1269 + if (data->swap_core_select && (data->sensorsp & SEL_CORE))
1270 core = core ? 0 : 1;
1271
1272 temp = TEMP_FROM_REG(data->temp[core][place]) + data->temp_offset;
1273 @@ -180,11 +180,13 @@ static int __devinit k8temp_probe(struct pci_dev *pdev,
1274 }
1275
1276 if ((model >= 0x69) &&
1277 - !(model == 0xc1 || model == 0x6c || model == 0x7c)) {
1278 + !(model == 0xc1 || model == 0x6c || model == 0x7c ||
1279 + model == 0x6b || model == 0x6f || model == 0x7f)) {
1280 /*
1281 - * RevG desktop CPUs (i.e. no socket S1G1 parts)
1282 - * need additional offset, otherwise reported
1283 - * temperature is below ambient temperature
1284 + * RevG desktop CPUs (i.e. no socket S1G1 or
1285 + * ASB1 parts) need additional offset,
1286 + * otherwise reported temperature is below
1287 + * ambient temperature
1288 */
1289 data->temp_offset = 21000;
1290 }
1291 diff --git a/drivers/ide/cmd640.c b/drivers/ide/cmd640.c
1292 index 1a32d62..a9c3313 100644
1293 --- a/drivers/ide/cmd640.c
1294 +++ b/drivers/ide/cmd640.c
1295 @@ -632,12 +632,10 @@ static void cmd640_init_dev(ide_drive_t *drive)
1296
1297 static int cmd640_test_irq(ide_hwif_t *hwif)
1298 {
1299 - struct pci_dev *dev = to_pci_dev(hwif->dev);
1300 int irq_reg = hwif->channel ? ARTTIM23 : CFR;
1301 - u8 irq_stat, irq_mask = hwif->channel ? ARTTIM23_IDE23INTR :
1302 + u8 irq_mask = hwif->channel ? ARTTIM23_IDE23INTR :
1303 CFR_IDE01INTR;
1304 -
1305 - pci_read_config_byte(dev, irq_reg, &irq_stat);
1306 + u8 irq_stat = get_cmd640_reg(irq_reg);
1307
1308 return (irq_stat & irq_mask) ? 1 : 0;
1309 }
1310 diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
1311 index cc8633c..67fb735 100644
1312 --- a/drivers/ide/ide-taskfile.c
1313 +++ b/drivers/ide/ide-taskfile.c
1314 @@ -428,13 +428,11 @@ int ide_raw_taskfile(ide_drive_t *drive, struct ide_cmd *cmd, u8 *buf,
1315 {
1316 struct request *rq;
1317 int error;
1318 + int rw = !(cmd->tf_flags & IDE_TFLAG_WRITE) ? READ : WRITE;
1319
1320 - rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
1321 + rq = blk_get_request(drive->queue, rw, __GFP_WAIT);
1322 rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
1323
1324 - if (cmd->tf_flags & IDE_TFLAG_WRITE)
1325 - rq->cmd_flags |= REQ_RW;
1326 -
1327 /*
1328 * (ks) We transfer currently only whole sectors.
1329 * This is suffient for now. But, it would be great,
1330 diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
1331 index df3eb8c..b4b2257 100644
1332 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
1333 +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
1334 @@ -1163,7 +1163,7 @@ static ssize_t create_child(struct device *dev,
1335
1336 return ret ? ret : count;
1337 }
1338 -static DEVICE_ATTR(create_child, S_IWUGO, NULL, create_child);
1339 +static DEVICE_ATTR(create_child, S_IWUSR, NULL, create_child);
1340
1341 static ssize_t delete_child(struct device *dev,
1342 struct device_attribute *attr,
1343 @@ -1183,7 +1183,7 @@ static ssize_t delete_child(struct device *dev,
1344 return ret ? ret : count;
1345
1346 }
1347 -static DEVICE_ATTR(delete_child, S_IWUGO, NULL, delete_child);
1348 +static DEVICE_ATTR(delete_child, S_IWUSR, NULL, delete_child);
1349
1350 int ipoib_add_pkey_attr(struct net_device *dev)
1351 {
1352 diff --git a/drivers/input/keyboard/twl4030_keypad.c b/drivers/input/keyboard/twl4030_keypad.c
1353 index 9a2977c..2cfbc17 100644
1354 --- a/drivers/input/keyboard/twl4030_keypad.c
1355 +++ b/drivers/input/keyboard/twl4030_keypad.c
1356 @@ -50,8 +50,12 @@
1357 */
1358 #define TWL4030_MAX_ROWS 8 /* TWL4030 hard limit */
1359 #define TWL4030_MAX_COLS 8
1360 -#define TWL4030_ROW_SHIFT 3
1361 -#define TWL4030_KEYMAP_SIZE (TWL4030_MAX_ROWS * TWL4030_MAX_COLS)
1362 +/*
1363 + * Note that we add space for an extra column so that we can handle
1364 + * row lines connected to the gnd (see twl4030_col_xlate()).
1365 + */
1366 +#define TWL4030_ROW_SHIFT 4
1367 +#define TWL4030_KEYMAP_SIZE (TWL4030_MAX_ROWS << TWL4030_ROW_SHIFT)
1368
1369 struct twl4030_keypad {
1370 unsigned short keymap[TWL4030_KEYMAP_SIZE];
1371 @@ -181,7 +185,7 @@ static int twl4030_read_kp_matrix_state(struct twl4030_keypad *kp, u16 *state)
1372 return ret;
1373 }
1374
1375 -static int twl4030_is_in_ghost_state(struct twl4030_keypad *kp, u16 *key_state)
1376 +static bool twl4030_is_in_ghost_state(struct twl4030_keypad *kp, u16 *key_state)
1377 {
1378 int i;
1379 u16 check = 0;
1380 @@ -190,12 +194,12 @@ static int twl4030_is_in_ghost_state(struct twl4030_keypad *kp, u16 *key_state)
1381 u16 col = key_state[i];
1382
1383 if ((col & check) && hweight16(col) > 1)
1384 - return 1;
1385 + return true;
1386
1387 check |= col;
1388 }
1389
1390 - return 0;
1391 + return false;
1392 }
1393
1394 static void twl4030_kp_scan(struct twl4030_keypad *kp, bool release_all)
1395 @@ -224,7 +228,8 @@ static void twl4030_kp_scan(struct twl4030_keypad *kp, bool release_all)
1396 if (!changed)
1397 continue;
1398
1399 - for (col = 0; col < kp->n_cols; col++) {
1400 + /* Extra column handles "all gnd" rows */
1401 + for (col = 0; col < kp->n_cols + 1; col++) {
1402 int code;
1403
1404 if (!(changed & (1 << col)))
1405 diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
1406 index 525b9b9..ba09e4d 100644
1407 --- a/drivers/input/serio/i8042-x86ia64io.h
1408 +++ b/drivers/input/serio/i8042-x86ia64io.h
1409 @@ -166,6 +166,13 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
1410 },
1411 },
1412 {
1413 + /* Gigabyte Spring Peak - defines wrong chassis type */
1414 + .matches = {
1415 + DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
1416 + DMI_MATCH(DMI_PRODUCT_NAME, "Spring Peak"),
1417 + },
1418 + },
1419 + {
1420 .matches = {
1421 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
1422 DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv9700"),
1423 diff --git a/drivers/media/dvb/dvb-core/dvb_net.c b/drivers/media/dvb/dvb-core/dvb_net.c
1424 index dddfc46..55e591d 100644
1425 --- a/drivers/media/dvb/dvb-core/dvb_net.c
1426 +++ b/drivers/media/dvb/dvb-core/dvb_net.c
1427 @@ -350,6 +350,7 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
1428 const u8 *ts, *ts_end, *from_where = NULL;
1429 u8 ts_remain = 0, how_much = 0, new_ts = 1;
1430 struct ethhdr *ethh = NULL;
1431 + bool error = false;
1432
1433 #ifdef ULE_DEBUG
1434 /* The code inside ULE_DEBUG keeps a history of the last 100 TS cells processed. */
1435 @@ -459,10 +460,16 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
1436
1437 /* Drop partly decoded SNDU, reset state, resync on PUSI. */
1438 if (priv->ule_skb) {
1439 - dev_kfree_skb( priv->ule_skb );
1440 + error = true;
1441 + dev_kfree_skb(priv->ule_skb);
1442 + }
1443 +
1444 + if (error || priv->ule_sndu_remain) {
1445 dev->stats.rx_errors++;
1446 dev->stats.rx_frame_errors++;
1447 + error = false;
1448 }
1449 +
1450 reset_ule(priv);
1451 priv->need_pusi = 1;
1452 continue;
1453 @@ -534,6 +541,7 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
1454 from_where += 2;
1455 }
1456
1457 + priv->ule_sndu_remain = priv->ule_sndu_len + 2;
1458 /*
1459 * State of current TS:
1460 * ts_remain (remaining bytes in the current TS cell)
1461 @@ -543,6 +551,7 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
1462 */
1463 switch (ts_remain) {
1464 case 1:
1465 + priv->ule_sndu_remain--;
1466 priv->ule_sndu_type = from_where[0] << 8;
1467 priv->ule_sndu_type_1 = 1; /* first byte of ule_type is set. */
1468 ts_remain -= 1; from_where += 1;
1469 @@ -556,6 +565,7 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
1470 default: /* complete ULE header is present in current TS. */
1471 /* Extract ULE type field. */
1472 if (priv->ule_sndu_type_1) {
1473 + priv->ule_sndu_type_1 = 0;
1474 priv->ule_sndu_type |= from_where[0];
1475 from_where += 1; /* points to payload start. */
1476 ts_remain -= 1;
1477 diff --git a/drivers/media/dvb/ttpci/Kconfig b/drivers/media/dvb/ttpci/Kconfig
1478 index d8d4214..32a7ec6 100644
1479 --- a/drivers/media/dvb/ttpci/Kconfig
1480 +++ b/drivers/media/dvb/ttpci/Kconfig
1481 @@ -68,13 +68,14 @@ config DVB_BUDGET
1482 select DVB_VES1820 if !DVB_FE_CUSTOMISE
1483 select DVB_L64781 if !DVB_FE_CUSTOMISE
1484 select DVB_TDA8083 if !DVB_FE_CUSTOMISE
1485 - select DVB_TDA10021 if !DVB_FE_CUSTOMISE
1486 - select DVB_TDA10023 if !DVB_FE_CUSTOMISE
1487 select DVB_S5H1420 if !DVB_FE_CUSTOMISE
1488 select DVB_TDA10086 if !DVB_FE_CUSTOMISE
1489 select DVB_TDA826X if !DVB_FE_CUSTOMISE
1490 select DVB_LNBP21 if !DVB_FE_CUSTOMISE
1491 select DVB_TDA1004X if !DVB_FE_CUSTOMISE
1492 + select DVB_ISL6423 if !DVB_FE_CUSTOMISE
1493 + select DVB_STV090x if !DVB_FE_CUSTOMISE
1494 + select DVB_STV6110x if !DVB_FE_CUSTOMISE
1495 help
1496 Support for simple SAA7146 based DVB cards (so called Budget-
1497 or Nova-PCI cards) without onboard MPEG2 decoder, and without
1498 diff --git a/drivers/media/video/cx23885/cx23885-i2c.c b/drivers/media/video/cx23885/cx23885-i2c.c
1499 index 4172cb3..d4746e0 100644
1500 --- a/drivers/media/video/cx23885/cx23885-i2c.c
1501 +++ b/drivers/media/video/cx23885/cx23885-i2c.c
1502 @@ -365,7 +365,17 @@ int cx23885_i2c_register(struct cx23885_i2c *bus)
1503
1504 memset(&info, 0, sizeof(struct i2c_board_info));
1505 strlcpy(info.type, "ir_video", I2C_NAME_SIZE);
1506 - i2c_new_probed_device(&bus->i2c_adap, &info, addr_list);
1507 + /*
1508 + * We can't call i2c_new_probed_device() because it uses
1509 + * quick writes for probing and the IR receiver device only
1510 + * replies to reads.
1511 + */
1512 + if (i2c_smbus_xfer(&bus->i2c_adap, addr_list[0], 0,
1513 + I2C_SMBUS_READ, 0, I2C_SMBUS_QUICK,
1514 + NULL) >= 0) {
1515 + info.addr = addr_list[0];
1516 + i2c_new_device(&bus->i2c_adap, &info);
1517 + }
1518 }
1519
1520 return bus->i2c_rc;
1521 diff --git a/drivers/media/video/cx88/cx88-i2c.c b/drivers/media/video/cx88/cx88-i2c.c
1522 index ee1ca39..fb39f11 100644
1523 --- a/drivers/media/video/cx88/cx88-i2c.c
1524 +++ b/drivers/media/video/cx88/cx88-i2c.c
1525 @@ -188,10 +188,24 @@ int cx88_i2c_init(struct cx88_core *core, struct pci_dev *pci)
1526 0x18, 0x6b, 0x71,
1527 I2C_CLIENT_END
1528 };
1529 + const unsigned short *addrp;
1530
1531 memset(&info, 0, sizeof(struct i2c_board_info));
1532 strlcpy(info.type, "ir_video", I2C_NAME_SIZE);
1533 - i2c_new_probed_device(&core->i2c_adap, &info, addr_list);
1534 + /*
1535 + * We can't call i2c_new_probed_device() because it uses
1536 + * quick writes for probing and at least some R receiver
1537 + * devices only reply to reads.
1538 + */
1539 + for (addrp = addr_list; *addrp != I2C_CLIENT_END; addrp++) {
1540 + if (i2c_smbus_xfer(&core->i2c_adap, *addrp, 0,
1541 + I2C_SMBUS_READ, 0,
1542 + I2C_SMBUS_QUICK, NULL) >= 0) {
1543 + info.addr = *addrp;
1544 + i2c_new_device(&core->i2c_adap, &info);
1545 + break;
1546 + }
1547 + }
1548 }
1549 return core->i2c_rc;
1550 }
1551 diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c
1552 index 8756be5..450ac70 100644
1553 --- a/drivers/media/video/uvc/uvc_driver.c
1554 +++ b/drivers/media/video/uvc/uvc_driver.c
1555 @@ -58,6 +58,11 @@ static struct uvc_format_desc uvc_fmts[] = {
1556 .fcc = V4L2_PIX_FMT_YUYV,
1557 },
1558 {
1559 + .name = "YUV 4:2:2 (YUYV)",
1560 + .guid = UVC_GUID_FORMAT_YUY2_ISIGHT,
1561 + .fcc = V4L2_PIX_FMT_YUYV,
1562 + },
1563 + {
1564 .name = "YUV 4:2:0 (NV12)",
1565 .guid = UVC_GUID_FORMAT_NV12,
1566 .fcc = V4L2_PIX_FMT_NV12,
1567 @@ -83,11 +88,16 @@ static struct uvc_format_desc uvc_fmts[] = {
1568 .fcc = V4L2_PIX_FMT_UYVY,
1569 },
1570 {
1571 - .name = "Greyscale",
1572 + .name = "Greyscale (8-bit)",
1573 .guid = UVC_GUID_FORMAT_Y800,
1574 .fcc = V4L2_PIX_FMT_GREY,
1575 },
1576 {
1577 + .name = "Greyscale (16-bit)",
1578 + .guid = UVC_GUID_FORMAT_Y16,
1579 + .fcc = V4L2_PIX_FMT_Y16,
1580 + },
1581 + {
1582 .name = "RGB Bayer",
1583 .guid = UVC_GUID_FORMAT_BY8,
1584 .fcc = V4L2_PIX_FMT_SBGGR8,
1585 @@ -2048,6 +2058,15 @@ static struct usb_device_id uvc_ids[] = {
1586 .bInterfaceSubClass = 1,
1587 .bInterfaceProtocol = 0,
1588 .driver_info = UVC_QUIRK_STREAM_NO_FID },
1589 + /* Syntek (Packard Bell EasyNote MX52 */
1590 + { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
1591 + | USB_DEVICE_ID_MATCH_INT_INFO,
1592 + .idVendor = 0x174f,
1593 + .idProduct = 0x8a12,
1594 + .bInterfaceClass = USB_CLASS_VIDEO,
1595 + .bInterfaceSubClass = 1,
1596 + .bInterfaceProtocol = 0,
1597 + .driver_info = UVC_QUIRK_STREAM_NO_FID },
1598 /* Syntek (Asus F9SG) */
1599 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
1600 | USB_DEVICE_ID_MATCH_INT_INFO,
1601 @@ -2112,6 +2131,15 @@ static struct usb_device_id uvc_ids[] = {
1602 .bInterfaceSubClass = 1,
1603 .bInterfaceProtocol = 0,
1604 .driver_info = UVC_QUIRK_PROBE_MINMAX },
1605 + /* Arkmicro unbranded */
1606 + { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
1607 + | USB_DEVICE_ID_MATCH_INT_INFO,
1608 + .idVendor = 0x18ec,
1609 + .idProduct = 0x3290,
1610 + .bInterfaceClass = USB_CLASS_VIDEO,
1611 + .bInterfaceSubClass = 1,
1612 + .bInterfaceProtocol = 0,
1613 + .driver_info = UVC_QUIRK_PROBE_DEF },
1614 /* Bodelin ProScopeHR */
1615 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
1616 | USB_DEVICE_ID_MATCH_DEV_HI
1617 diff --git a/drivers/media/video/uvc/uvcvideo.h b/drivers/media/video/uvc/uvcvideo.h
1618 index e7958aa..64007b9 100644
1619 --- a/drivers/media/video/uvc/uvcvideo.h
1620 +++ b/drivers/media/video/uvc/uvcvideo.h
1621 @@ -112,6 +112,9 @@ struct uvc_xu_control {
1622 #define UVC_GUID_FORMAT_YUY2 \
1623 { 'Y', 'U', 'Y', '2', 0x00, 0x00, 0x10, 0x00, \
1624 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
1625 +#define UVC_GUID_FORMAT_YUY2_ISIGHT \
1626 + { 'Y', 'U', 'Y', '2', 0x00, 0x00, 0x10, 0x00, \
1627 + 0x80, 0x00, 0x00, 0x00, 0x00, 0x38, 0x9b, 0x71}
1628 #define UVC_GUID_FORMAT_NV12 \
1629 { 'N', 'V', '1', '2', 0x00, 0x00, 0x10, 0x00, \
1630 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
1631 @@ -127,11 +130,13 @@ struct uvc_xu_control {
1632 #define UVC_GUID_FORMAT_Y800 \
1633 { 'Y', '8', '0', '0', 0x00, 0x00, 0x10, 0x00, \
1634 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
1635 +#define UVC_GUID_FORMAT_Y16 \
1636 + { 'Y', '1', '6', ' ', 0x00, 0x00, 0x10, 0x00, \
1637 + 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
1638 #define UVC_GUID_FORMAT_BY8 \
1639 { 'B', 'Y', '8', ' ', 0x00, 0x00, 0x10, 0x00, \
1640 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
1641
1642 -
1643 /* ------------------------------------------------------------------------
1644 * Driver specific constants.
1645 */
1646 diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
1647 index 50997d2..676cd0c 100644
1648 --- a/drivers/mmc/host/sdhci-s3c.c
1649 +++ b/drivers/mmc/host/sdhci-s3c.c
1650 @@ -372,6 +372,26 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
1651
1652 static int __devexit sdhci_s3c_remove(struct platform_device *pdev)
1653 {
1654 + struct sdhci_host *host = platform_get_drvdata(pdev);
1655 + struct sdhci_s3c *sc = sdhci_priv(host);
1656 + int ptr;
1657 +
1658 + sdhci_remove_host(host, 1);
1659 +
1660 + for (ptr = 0; ptr < 3; ptr++) {
1661 + clk_disable(sc->clk_bus[ptr]);
1662 + clk_put(sc->clk_bus[ptr]);
1663 + }
1664 + clk_disable(sc->clk_io);
1665 + clk_put(sc->clk_io);
1666 +
1667 + iounmap(host->ioaddr);
1668 + release_resource(sc->ioarea);
1669 + kfree(sc->ioarea);
1670 +
1671 + sdhci_free_host(host);
1672 + platform_set_drvdata(pdev, NULL);
1673 +
1674 return 0;
1675 }
1676
1677 diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
1678 index 61f9da2..1cace00 100644
1679 --- a/drivers/net/cpmac.c
1680 +++ b/drivers/net/cpmac.c
1681 @@ -1176,7 +1176,8 @@ static int __devinit cpmac_probe(struct platform_device *pdev)
1682 if (netif_msg_drv(priv))
1683 printk(KERN_ERR "%s: Could not attach to PHY\n",
1684 dev->name);
1685 - return PTR_ERR(priv->phy);
1686 + rc = PTR_ERR(priv->phy);
1687 + goto fail;
1688 }
1689
1690 if ((rc = register_netdev(dev))) {
1691 diff --git a/drivers/net/cxgb3/ael1002.c b/drivers/net/cxgb3/ael1002.c
1692 index 5248f9e..35cd367 100644
1693 --- a/drivers/net/cxgb3/ael1002.c
1694 +++ b/drivers/net/cxgb3/ael1002.c
1695 @@ -934,7 +934,7 @@ static struct cphy_ops xaui_direct_ops = {
1696 int t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter,
1697 int phy_addr, const struct mdio_ops *mdio_ops)
1698 {
1699 - cphy_init(phy, adapter, MDIO_PRTAD_NONE, &xaui_direct_ops, mdio_ops,
1700 + cphy_init(phy, adapter, phy_addr, &xaui_direct_ops, mdio_ops,
1701 SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_TP,
1702 "10GBASE-CX4");
1703 return 0;
1704 diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
1705 index 31b8bef..3f5eb81 100644
1706 --- a/drivers/net/dm9000.c
1707 +++ b/drivers/net/dm9000.c
1708 @@ -471,17 +471,13 @@ static uint32_t dm9000_get_rx_csum(struct net_device *dev)
1709 return dm->rx_csum;
1710 }
1711
1712 -static int dm9000_set_rx_csum(struct net_device *dev, uint32_t data)
1713 +static int dm9000_set_rx_csum_unlocked(struct net_device *dev, uint32_t data)
1714 {
1715 board_info_t *dm = to_dm9000_board(dev);
1716 - unsigned long flags;
1717
1718 if (dm->can_csum) {
1719 dm->rx_csum = data;
1720 -
1721 - spin_lock_irqsave(&dm->lock, flags);
1722 iow(dm, DM9000_RCSR, dm->rx_csum ? RCSR_CSUM : 0);
1723 - spin_unlock_irqrestore(&dm->lock, flags);
1724
1725 return 0;
1726 }
1727 @@ -489,6 +485,19 @@ static int dm9000_set_rx_csum(struct net_device *dev, uint32_t data)
1728 return -EOPNOTSUPP;
1729 }
1730
1731 +static int dm9000_set_rx_csum(struct net_device *dev, uint32_t data)
1732 +{
1733 + board_info_t *dm = to_dm9000_board(dev);
1734 + unsigned long flags;
1735 + int ret;
1736 +
1737 + spin_lock_irqsave(&dm->lock, flags);
1738 + ret = dm9000_set_rx_csum_unlocked(dev, data);
1739 + spin_unlock_irqrestore(&dm->lock, flags);
1740 +
1741 + return ret;
1742 +}
1743 +
1744 static int dm9000_set_tx_csum(struct net_device *dev, uint32_t data)
1745 {
1746 board_info_t *dm = to_dm9000_board(dev);
1747 @@ -667,7 +676,7 @@ static unsigned char dm9000_type_to_char(enum dm9000_type type)
1748 * Set DM9000 multicast address
1749 */
1750 static void
1751 -dm9000_hash_table(struct net_device *dev)
1752 +dm9000_hash_table_unlocked(struct net_device *dev)
1753 {
1754 board_info_t *db = netdev_priv(dev);
1755 struct dev_mc_list *mcptr = dev->mc_list;
1756 @@ -676,12 +685,9 @@ dm9000_hash_table(struct net_device *dev)
1757 u32 hash_val;
1758 u16 hash_table[4];
1759 u8 rcr = RCR_DIS_LONG | RCR_DIS_CRC | RCR_RXEN;
1760 - unsigned long flags;
1761
1762 dm9000_dbg(db, 1, "entering %s\n", __func__);
1763
1764 - spin_lock_irqsave(&db->lock, flags);
1765 -
1766 for (i = 0, oft = DM9000_PAR; i < 6; i++, oft++)
1767 iow(db, oft, dev->dev_addr[i]);
1768
1769 @@ -711,6 +717,16 @@ dm9000_hash_table(struct net_device *dev)
1770 }
1771
1772 iow(db, DM9000_RCR, rcr);
1773 +}
1774 +
1775 +static void
1776 +dm9000_hash_table(struct net_device *dev)
1777 +{
1778 + board_info_t *db = netdev_priv(dev);
1779 + unsigned long flags;
1780 +
1781 + spin_lock_irqsave(&db->lock, flags);
1782 + dm9000_hash_table_unlocked(dev);
1783 spin_unlock_irqrestore(&db->lock, flags);
1784 }
1785
1786 @@ -729,7 +745,7 @@ dm9000_init_dm9000(struct net_device *dev)
1787 db->io_mode = ior(db, DM9000_ISR) >> 6; /* ISR bit7:6 keeps I/O mode */
1788
1789 /* Checksum mode */
1790 - dm9000_set_rx_csum(dev, db->rx_csum);
1791 + dm9000_set_rx_csum_unlocked(dev, db->rx_csum);
1792
1793 /* GPIO0 on pre-activate PHY */
1794 iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */
1795 @@ -749,7 +765,7 @@ dm9000_init_dm9000(struct net_device *dev)
1796 iow(db, DM9000_ISR, ISR_CLR_STATUS); /* Clear interrupt status */
1797
1798 /* Set address filter table */
1799 - dm9000_hash_table(dev);
1800 + dm9000_hash_table_unlocked(dev);
1801
1802 imr = IMR_PAR | IMR_PTM | IMR_PRM;
1803 if (db->type != TYPE_DM9000E)
1804 diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
1805 index 3116601..7cd446d 100644
1806 --- a/drivers/net/forcedeth.c
1807 +++ b/drivers/net/forcedeth.c
1808 @@ -5900,7 +5900,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
1809 /* Limit the number of tx's outstanding for hw bug */
1810 if (id->driver_data & DEV_NEED_TX_LIMIT) {
1811 np->tx_limit = 1;
1812 - if ((id->driver_data & DEV_NEED_TX_LIMIT2) &&
1813 + if (((id->driver_data & DEV_NEED_TX_LIMIT2) == DEV_NEED_TX_LIMIT2) &&
1814 pci_dev->revision >= 0xA2)
1815 np->tx_limit = 0;
1816 }
1817 diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
1818 index c6d97eb..33352ff 100644
1819 --- a/drivers/net/igb/e1000_82575.c
1820 +++ b/drivers/net/igb/e1000_82575.c
1821 @@ -1168,9 +1168,18 @@ static s32 igb_read_mac_addr_82575(struct e1000_hw *hw)
1822 {
1823 s32 ret_val = 0;
1824
1825 - if (igb_check_alt_mac_addr(hw))
1826 - ret_val = igb_read_mac_addr(hw);
1827 + /*
1828 + * If there's an alternate MAC address place it in RAR0
1829 + * so that it will override the Si installed default perm
1830 + * address.
1831 + */
1832 + ret_val = igb_check_alt_mac_addr(hw);
1833 + if (ret_val)
1834 + goto out;
1835 +
1836 + ret_val = igb_read_mac_addr(hw);
1837
1838 +out:
1839 return ret_val;
1840 }
1841
1842 diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
1843 index 1a23aeb..72081df 100644
1844 --- a/drivers/net/igb/e1000_hw.h
1845 +++ b/drivers/net/igb/e1000_hw.h
1846 @@ -53,6 +53,8 @@ struct e1000_hw;
1847
1848 #define E1000_FUNC_1 1
1849
1850 +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3
1851 +
1852 enum e1000_mac_type {
1853 e1000_undefined = 0,
1854 e1000_82575,
1855 diff --git a/drivers/net/igb/e1000_mac.c b/drivers/net/igb/e1000_mac.c
1856 index 7d76bb0..d4fa82c 100644
1857 --- a/drivers/net/igb/e1000_mac.c
1858 +++ b/drivers/net/igb/e1000_mac.c
1859 @@ -185,13 +185,12 @@ s32 igb_check_alt_mac_addr(struct e1000_hw *hw)
1860 }
1861
1862 if (nvm_alt_mac_addr_offset == 0xFFFF) {
1863 - ret_val = -(E1000_NOT_IMPLEMENTED);
1864 + /* There is no Alternate MAC Address */
1865 goto out;
1866 }
1867
1868 if (hw->bus.func == E1000_FUNC_1)
1869 - nvm_alt_mac_addr_offset += ETH_ALEN/sizeof(u16);
1870 -
1871 + nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
1872 for (i = 0; i < ETH_ALEN; i += 2) {
1873 offset = nvm_alt_mac_addr_offset + (i >> 1);
1874 ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data);
1875 @@ -206,14 +205,16 @@ s32 igb_check_alt_mac_addr(struct e1000_hw *hw)
1876
1877 /* if multicast bit is set, the alternate address will not be used */
1878 if (alt_mac_addr[0] & 0x01) {
1879 - ret_val = -(E1000_NOT_IMPLEMENTED);
1880 + hw_dbg("Ignoring Alternate Mac Address with MC bit set\n");
1881 goto out;
1882 }
1883
1884 - for (i = 0; i < ETH_ALEN; i++)
1885 - hw->mac.addr[i] = hw->mac.perm_addr[i] = alt_mac_addr[i];
1886 -
1887 - hw->mac.ops.rar_set(hw, hw->mac.perm_addr, 0);
1888 + /*
1889 + * We have a valid alternate MAC address, and we want to treat it the
1890 + * same as the normal permanent MAC address stored by the HW into the
1891 + * RAR. Do this by mapping this address into RAR0.
1892 + */
1893 + hw->mac.ops.rar_set(hw, alt_mac_addr, 0);
1894
1895 out:
1896 return ret_val;
1897 diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
1898 index 77125b5..a17aaee 100644
1899 --- a/drivers/net/sky2.c
1900 +++ b/drivers/net/sky2.c
1901 @@ -704,11 +704,24 @@ static void sky2_phy_power_down(struct sky2_hw *hw, unsigned port)
1902 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1903 }
1904
1905 +/* Enable Rx/Tx */
1906 +static void sky2_enable_rx_tx(struct sky2_port *sky2)
1907 +{
1908 + struct sky2_hw *hw = sky2->hw;
1909 + unsigned port = sky2->port;
1910 + u16 reg;
1911 +
1912 + reg = gma_read16(hw, port, GM_GP_CTRL);
1913 + reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
1914 + gma_write16(hw, port, GM_GP_CTRL, reg);
1915 +}
1916 +
1917 /* Force a renegotiation */
1918 static void sky2_phy_reinit(struct sky2_port *sky2)
1919 {
1920 spin_lock_bh(&sky2->phy_lock);
1921 sky2_phy_init(sky2->hw, sky2->port);
1922 + sky2_enable_rx_tx(sky2);
1923 spin_unlock_bh(&sky2->phy_lock);
1924 }
1925
1926 @@ -1929,7 +1942,6 @@ static void sky2_link_up(struct sky2_port *sky2)
1927 {
1928 struct sky2_hw *hw = sky2->hw;
1929 unsigned port = sky2->port;
1930 - u16 reg;
1931 static const char *fc_name[] = {
1932 [FC_NONE] = "none",
1933 [FC_TX] = "tx",
1934 @@ -1937,10 +1949,7 @@ static void sky2_link_up(struct sky2_port *sky2)
1935 [FC_BOTH] = "both",
1936 };
1937
1938 - /* enable Rx/Tx */
1939 - reg = gma_read16(hw, port, GM_GP_CTRL);
1940 - reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
1941 - gma_write16(hw, port, GM_GP_CTRL, reg);
1942 + sky2_enable_rx_tx(sky2);
1943
1944 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK);
1945
1946 diff --git a/drivers/net/wireless/ath/ath5k/attach.c b/drivers/net/wireless/ath/ath5k/attach.c
1947 index 71a1bd25..88663df 100644
1948 --- a/drivers/net/wireless/ath/ath5k/attach.c
1949 +++ b/drivers/net/wireless/ath/ath5k/attach.c
1950 @@ -133,6 +133,7 @@ struct ath5k_hw *ath5k_hw_attach(struct ath5k_softc *sc)
1951 ah->ah_cw_min = AR5K_TUNE_CWMIN;
1952 ah->ah_limit_tx_retries = AR5K_INIT_TX_RETRY;
1953 ah->ah_software_retry = false;
1954 + ah->ah_current_channel = &sc->channels[0];
1955
1956 /*
1957 * Find the mac version
1958 diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
1959 index cefbfb9..7ee61d4 100644
1960 --- a/drivers/net/wireless/ath/ath5k/base.c
1961 +++ b/drivers/net/wireless/ath/ath5k/base.c
1962 @@ -1818,11 +1818,6 @@ ath5k_tasklet_rx(unsigned long data)
1963 return;
1964 }
1965
1966 - if (unlikely(rs.rs_more)) {
1967 - ATH5K_WARN(sc, "unsupported jumbo\n");
1968 - goto next;
1969 - }
1970 -
1971 if (unlikely(rs.rs_status)) {
1972 if (rs.rs_status & AR5K_RXERR_PHY)
1973 goto next;
1974 @@ -1852,6 +1847,8 @@ ath5k_tasklet_rx(unsigned long data)
1975 sc->opmode != NL80211_IFTYPE_MONITOR)
1976 goto next;
1977 }
1978 + if (unlikely(rs.rs_more))
1979 + goto next;
1980 accept:
1981 next_skb = ath5k_rx_skb_alloc(sc, &next_skb_addr);
1982
1983 diff --git a/drivers/net/wireless/ath/ath9k/initvals.h b/drivers/net/wireless/ath/ath9k/initvals.h
1984 index 8622265..a21c214 100644
1985 --- a/drivers/net/wireless/ath/ath9k/initvals.h
1986 +++ b/drivers/net/wireless/ath/ath9k/initvals.h
1987 @@ -2762,7 +2762,7 @@ static const u32 ar9280Common_9280_2[][2] = {
1988 { 0x00008258, 0x00000000 },
1989 { 0x0000825c, 0x400000ff },
1990 { 0x00008260, 0x00080922 },
1991 - { 0x00008264, 0xa8a00010 },
1992 + { 0x00008264, 0x88a00010 },
1993 { 0x00008270, 0x00000000 },
1994 { 0x00008274, 0x40000000 },
1995 { 0x00008278, 0x003e4180 },
1996 @@ -3935,7 +3935,7 @@ static const u_int32_t ar9285Common_9285[][2] = {
1997 { 0x00008258, 0x00000000 },
1998 { 0x0000825c, 0x400000ff },
1999 { 0x00008260, 0x00080922 },
2000 - { 0x00008264, 0xa8a00010 },
2001 + { 0x00008264, 0x88a00010 },
2002 { 0x00008270, 0x00000000 },
2003 { 0x00008274, 0x40000000 },
2004 { 0x00008278, 0x003e4180 },
2005 @@ -5072,7 +5072,7 @@ static const u_int32_t ar9287Common_9287_1_0[][2] = {
2006 { 0x00008258, 0x00000000 },
2007 { 0x0000825c, 0x400000ff },
2008 { 0x00008260, 0x00080922 },
2009 - { 0x00008264, 0xa8a00010 },
2010 + { 0x00008264, 0x88a00010 },
2011 { 0x00008270, 0x00000000 },
2012 { 0x00008274, 0x40000000 },
2013 { 0x00008278, 0x003e4180 },
2014 @@ -6864,7 +6864,7 @@ static const u_int32_t ar9271Common_9271_1_0[][2] = {
2015 { 0x00008258, 0x00000000 },
2016 { 0x0000825c, 0x400000ff },
2017 { 0x00008260, 0x00080922 },
2018 - { 0x00008264, 0xa8a00010 },
2019 + { 0x00008264, 0x88a00010 },
2020 { 0x00008270, 0x00000000 },
2021 { 0x00008274, 0x40000000 },
2022 { 0x00008278, 0x003e4180 },
2023 diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
2024 index 087b7e5..0c349ce 100644
2025 --- a/drivers/net/wireless/ath/ath9k/main.c
2026 +++ b/drivers/net/wireless/ath/ath9k/main.c
2027 @@ -1538,6 +1538,8 @@ bad_no_ah:
2028
2029 void ath_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
2030 {
2031 + struct ath_hw *ah = sc->sc_ah;
2032 +
2033 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
2034 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
2035 IEEE80211_HW_SIGNAL_DBM |
2036 @@ -1558,7 +1560,10 @@ void ath_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
2037 BIT(NL80211_IFTYPE_ADHOC) |
2038 BIT(NL80211_IFTYPE_MESH_POINT);
2039
2040 - hw->wiphy->ps_default = false;
2041 + if (AR_SREV_5416(ah))
2042 + hw->wiphy->ps_default = false;
2043 + else
2044 + hw->wiphy->ps_default = true;
2045
2046 hw->queues = 4;
2047 hw->max_rates = 4;
2048 diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c
2049 index ad8eab4..b4ff1dc 100644
2050 --- a/drivers/net/wireless/hostap/hostap_cs.c
2051 +++ b/drivers/net/wireless/hostap/hostap_cs.c
2052 @@ -626,6 +626,7 @@ static int prism2_config(struct pcmcia_device *link)
2053 int ret = 1;
2054 int last_fn, last_ret;
2055 struct hostap_cs_priv *hw_priv;
2056 + unsigned long flags;
2057
2058 PDEBUG(DEBUG_FLOW, "prism2_config()\n");
2059
2060 @@ -661,6 +662,12 @@ static int prism2_config(struct pcmcia_device *link)
2061 link->dev_node = &hw_priv->node;
2062
2063 /*
2064 + * Make sure the IRQ handler cannot proceed until at least
2065 + * dev->base_addr is initialized.
2066 + */
2067 + spin_lock_irqsave(&local->irq_init_lock, flags);
2068 +
2069 + /*
2070 * Allocate an interrupt line. Note that this does not assign a
2071 * handler to the interrupt, unless the 'Handler' member of the
2072 * irq structure is initialized.
2073 @@ -686,6 +693,8 @@ static int prism2_config(struct pcmcia_device *link)
2074 dev->irq = link->irq.AssignedIRQ;
2075 dev->base_addr = link->io.BasePort1;
2076
2077 + spin_unlock_irqrestore(&local->irq_init_lock, flags);
2078 +
2079 /* Finally, report what we've done */
2080 printk(KERN_INFO "%s: index 0x%02x: ",
2081 dev_info, link->conf.ConfigIndex);
2082 @@ -715,6 +724,7 @@ static int prism2_config(struct pcmcia_device *link)
2083 return ret;
2084
2085 cs_failed:
2086 + spin_unlock_irqrestore(&local->irq_init_lock, flags);
2087 cs_error(link, last_fn, last_ret);
2088
2089 failed:
2090 diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c
2091 index ff9b5c8..2f999fc 100644
2092 --- a/drivers/net/wireless/hostap/hostap_hw.c
2093 +++ b/drivers/net/wireless/hostap/hostap_hw.c
2094 @@ -2621,6 +2621,18 @@ static irqreturn_t prism2_interrupt(int irq, void *dev_id)
2095 iface = netdev_priv(dev);
2096 local = iface->local;
2097
2098 + /* Detect early interrupt before driver is fully configued */
2099 + spin_lock(&local->irq_init_lock);
2100 + if (!dev->base_addr) {
2101 + if (net_ratelimit()) {
2102 + printk(KERN_DEBUG "%s: Interrupt, but dev not configured\n",
2103 + dev->name);
2104 + }
2105 + spin_unlock(&local->irq_init_lock);
2106 + return IRQ_HANDLED;
2107 + }
2108 + spin_unlock(&local->irq_init_lock);
2109 +
2110 prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INTERRUPT, 0, 0);
2111
2112 if (local->func->card_present && !local->func->card_present(local)) {
2113 @@ -3138,6 +3150,7 @@ prism2_init_local_data(struct prism2_helper_functions *funcs, int card_idx,
2114 spin_lock_init(&local->cmdlock);
2115 spin_lock_init(&local->baplock);
2116 spin_lock_init(&local->lock);
2117 + spin_lock_init(&local->irq_init_lock);
2118 mutex_init(&local->rid_bap_mtx);
2119
2120 if (card_idx < 0 || card_idx >= MAX_PARM_DEVICES)
2121 diff --git a/drivers/net/wireless/hostap/hostap_wlan.h b/drivers/net/wireless/hostap/hostap_wlan.h
2122 index 3d23891..1ba33be 100644
2123 --- a/drivers/net/wireless/hostap/hostap_wlan.h
2124 +++ b/drivers/net/wireless/hostap/hostap_wlan.h
2125 @@ -654,7 +654,7 @@ struct local_info {
2126 rwlock_t iface_lock; /* hostap_interfaces read lock; use write lock
2127 * when removing entries from the list.
2128 * TX and RX paths can use read lock. */
2129 - spinlock_t cmdlock, baplock, lock;
2130 + spinlock_t cmdlock, baplock, lock, irq_init_lock;
2131 struct mutex rid_bap_mtx;
2132 u16 infofid; /* MAC buffer id for info frame */
2133 /* txfid, intransmitfid, next_txtid, and next_alloc are protected by
2134 diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
2135 index 71c0ad4..db2946e 100644
2136 --- a/drivers/net/wireless/iwlwifi/iwl-scan.c
2137 +++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
2138 @@ -799,6 +799,7 @@ void iwl_bg_abort_scan(struct work_struct *work)
2139
2140 mutex_lock(&priv->mutex);
2141
2142 + cancel_delayed_work_sync(&priv->scan_check);
2143 set_bit(STATUS_SCAN_ABORTING, &priv->status);
2144 iwl_send_scan_abort(priv);
2145
2146 diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
2147 index 7f15b7e..d21c06e 100644
2148 --- a/drivers/net/wireless/iwlwifi/iwl-tx.c
2149 +++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
2150 @@ -1479,6 +1479,11 @@ void iwl_rx_reply_compressed_ba(struct iwl_priv *priv,
2151 sta_id = ba_resp->sta_id;
2152 tid = ba_resp->tid;
2153 agg = &priv->stations[sta_id].tid[tid].agg;
2154 + if (unlikely(agg->txq_id != scd_flow)) {
2155 + IWL_ERR(priv, "BA scd_flow %d does not match txq_id %d\n",
2156 + scd_flow, agg->txq_id);
2157 + return;
2158 + }
2159
2160 /* Find index just before block-ack window */
2161 index = iwl_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
2162 diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
2163 index 485a8d4..f876d02 100644
2164 --- a/drivers/net/wireless/libertas/if_sdio.c
2165 +++ b/drivers/net/wireless/libertas/if_sdio.c
2166 @@ -34,6 +34,8 @@
2167 #include <linux/mmc/card.h>
2168 #include <linux/mmc/sdio_func.h>
2169 #include <linux/mmc/sdio_ids.h>
2170 +#include <linux/mmc/sdio.h>
2171 +#include <linux/mmc/host.h>
2172
2173 #include "host.h"
2174 #include "decl.h"
2175 @@ -883,6 +885,7 @@ static int if_sdio_probe(struct sdio_func *func,
2176 int ret, i;
2177 unsigned int model;
2178 struct if_sdio_packet *packet;
2179 + struct mmc_host *host = func->card->host;
2180
2181 lbs_deb_enter(LBS_DEB_SDIO);
2182
2183 @@ -963,6 +966,25 @@ static int if_sdio_probe(struct sdio_func *func,
2184 if (ret)
2185 goto disable;
2186
2187 + /* For 1-bit transfers to the 8686 model, we need to enable the
2188 + * interrupt flag in the CCCR register. Set the MMC_QUIRK_LENIENT_FN0
2189 + * bit to allow access to non-vendor registers. */
2190 + if ((card->model == IF_SDIO_MODEL_8686) &&
2191 + (host->caps & MMC_CAP_SDIO_IRQ) &&
2192 + (host->ios.bus_width == MMC_BUS_WIDTH_1)) {
2193 + u8 reg;
2194 +
2195 + func->card->quirks |= MMC_QUIRK_LENIENT_FN0;
2196 + reg = sdio_f0_readb(func, SDIO_CCCR_IF, &ret);
2197 + if (ret)
2198 + goto release_int;
2199 +
2200 + reg |= SDIO_BUS_ECSI;
2201 + sdio_f0_writeb(func, reg, SDIO_CCCR_IF, &ret);
2202 + if (ret)
2203 + goto release_int;
2204 + }
2205 +
2206 card->ioport = sdio_readb(func, IF_SDIO_IOPORT, &ret);
2207 if (ret)
2208 goto release_int;
2209 diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
2210 index 7bafa83..3df13f5 100644
2211 --- a/drivers/net/wireless/p54/p54pci.c
2212 +++ b/drivers/net/wireless/p54/p54pci.c
2213 @@ -40,6 +40,8 @@ static struct pci_device_id p54p_table[] __devinitdata = {
2214 { PCI_DEVICE(0x1260, 0x3877) },
2215 /* Intersil PRISM Javelin/Xbow Wireless LAN adapter */
2216 { PCI_DEVICE(0x1260, 0x3886) },
2217 + /* Intersil PRISM Xbow Wireless LAN adapter (Symbol AP-300) */
2218 + { PCI_DEVICE(0x1260, 0xffff) },
2219 { },
2220 };
2221
2222 diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
2223 index 595d03a..812d4ac 100644
2224 --- a/drivers/pci/pci.c
2225 +++ b/drivers/pci/pci.c
2226 @@ -2046,6 +2046,7 @@ void pci_msi_off(struct pci_dev *dev)
2227 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
2228 }
2229 }
2230 +EXPORT_SYMBOL_GPL(pci_msi_off);
2231
2232 #ifndef HAVE_ARCH_PCI_SET_DMA_MASK
2233 /*
2234 diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
2235 index 329093e..c533b1c 100644
2236 --- a/drivers/platform/x86/eeepc-laptop.c
2237 +++ b/drivers/platform/x86/eeepc-laptop.c
2238 @@ -752,6 +752,8 @@ static void eeepc_rfkill_hotplug(void)
2239 struct pci_dev *dev;
2240 struct pci_bus *bus;
2241 bool blocked = eeepc_wlan_rfkill_blocked();
2242 + bool absent;
2243 + u32 l;
2244
2245 if (ehotk->wlan_rfkill)
2246 rfkill_set_sw_state(ehotk->wlan_rfkill, blocked);
2247 @@ -765,6 +767,22 @@ static void eeepc_rfkill_hotplug(void)
2248 goto out_unlock;
2249 }
2250
2251 + if (pci_bus_read_config_dword(bus, 0, PCI_VENDOR_ID, &l)) {
2252 + pr_err("Unable to read PCI config space?\n");
2253 + goto out_unlock;
2254 + }
2255 + absent = (l == 0xffffffff);
2256 +
2257 + if (blocked != absent) {
2258 + pr_warning("BIOS says wireless lan is %s, "
2259 + "but the pci device is %s\n",
2260 + blocked ? "blocked" : "unblocked",
2261 + absent ? "absent" : "present");
2262 + pr_warning("skipped wireless hotplug as probably "
2263 + "inappropriate for this model\n");
2264 + goto out_unlock;
2265 + }
2266 +
2267 if (!blocked) {
2268 dev = pci_get_slot(bus, 0);
2269 if (dev) {
2270 diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
2271 index eb99ee4..861d91d 100644
2272 --- a/drivers/rtc/rtc-ds1307.c
2273 +++ b/drivers/rtc/rtc-ds1307.c
2274 @@ -775,7 +775,7 @@ static int __devinit ds1307_probe(struct i2c_client *client,
2275
2276 read_rtc:
2277 /* read RTC registers */
2278 - tmp = ds1307->read_block_data(ds1307->client, 0, 8, buf);
2279 + tmp = ds1307->read_block_data(ds1307->client, ds1307->offset, 8, buf);
2280 if (tmp != 8) {
2281 pr_debug("read error %d\n", tmp);
2282 err = -EIO;
2283 @@ -860,7 +860,7 @@ read_rtc:
2284 if (ds1307->regs[DS1307_REG_HOUR] & DS1307_BIT_PM)
2285 tmp += 12;
2286 i2c_smbus_write_byte_data(client,
2287 - DS1307_REG_HOUR,
2288 + ds1307->offset + DS1307_REG_HOUR,
2289 bin2bcd(tmp));
2290 }
2291
2292 diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
2293 index 0391d75..a5b8e7b 100644
2294 --- a/drivers/scsi/aacraid/commctrl.c
2295 +++ b/drivers/scsi/aacraid/commctrl.c
2296 @@ -655,9 +655,9 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
2297 /* Does this really need to be GFP_DMA? */
2298 p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA);
2299 if(!p) {
2300 - kfree (usg);
2301 - dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
2302 + dprintk((KERN_DEBUG "aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
2303 usg->sg[i].count,i,usg->count));
2304 + kfree(usg);
2305 rcode = -ENOMEM;
2306 goto cleanup;
2307 }
2308 diff --git a/drivers/serial/cpm_uart/cpm_uart_core.c b/drivers/serial/cpm_uart/cpm_uart_core.c
2309 index 300cea7..7feb902 100644
2310 --- a/drivers/serial/cpm_uart/cpm_uart_core.c
2311 +++ b/drivers/serial/cpm_uart/cpm_uart_core.c
2312 @@ -930,6 +930,83 @@ static void cpm_uart_config_port(struct uart_port *port, int flags)
2313 }
2314 }
2315
2316 +#if defined(CONFIG_CONSOLE_POLL) || defined(CONFIG_SERIAL_CPM_CONSOLE)
2317 +/*
2318 + * Write a string to the serial port
2319 + * Note that this is called with interrupts already disabled
2320 + */
2321 +static void cpm_uart_early_write(struct uart_cpm_port *pinfo,
2322 + const char *string, u_int count)
2323 +{
2324 + unsigned int i;
2325 + cbd_t __iomem *bdp, *bdbase;
2326 + unsigned char *cpm_outp_addr;
2327 +
2328 + /* Get the address of the host memory buffer.
2329 + */
2330 + bdp = pinfo->tx_cur;
2331 + bdbase = pinfo->tx_bd_base;
2332 +
2333 + /*
2334 + * Now, do each character. This is not as bad as it looks
2335 + * since this is a holding FIFO and not a transmitting FIFO.
2336 + * We could add the complexity of filling the entire transmit
2337 + * buffer, but we would just wait longer between accesses......
2338 + */
2339 + for (i = 0; i < count; i++, string++) {
2340 + /* Wait for transmitter fifo to empty.
2341 + * Ready indicates output is ready, and xmt is doing
2342 + * that, not that it is ready for us to send.
2343 + */
2344 + while ((in_be16(&bdp->cbd_sc) & BD_SC_READY) != 0)
2345 + ;
2346 +
2347 + /* Send the character out.
2348 + * If the buffer address is in the CPM DPRAM, don't
2349 + * convert it.
2350 + */
2351 + cpm_outp_addr = cpm2cpu_addr(in_be32(&bdp->cbd_bufaddr),
2352 + pinfo);
2353 + *cpm_outp_addr = *string;
2354 +
2355 + out_be16(&bdp->cbd_datlen, 1);
2356 + setbits16(&bdp->cbd_sc, BD_SC_READY);
2357 +
2358 + if (in_be16(&bdp->cbd_sc) & BD_SC_WRAP)
2359 + bdp = bdbase;
2360 + else
2361 + bdp++;
2362 +
2363 + /* if a LF, also do CR... */
2364 + if (*string == 10) {
2365 + while ((in_be16(&bdp->cbd_sc) & BD_SC_READY) != 0)
2366 + ;
2367 +
2368 + cpm_outp_addr = cpm2cpu_addr(in_be32(&bdp->cbd_bufaddr),
2369 + pinfo);
2370 + *cpm_outp_addr = 13;
2371 +
2372 + out_be16(&bdp->cbd_datlen, 1);
2373 + setbits16(&bdp->cbd_sc, BD_SC_READY);
2374 +
2375 + if (in_be16(&bdp->cbd_sc) & BD_SC_WRAP)
2376 + bdp = bdbase;
2377 + else
2378 + bdp++;
2379 + }
2380 + }
2381 +
2382 + /*
2383 + * Finally, Wait for transmitter & holding register to empty
2384 + * and restore the IER
2385 + */
2386 + while ((in_be16(&bdp->cbd_sc) & BD_SC_READY) != 0)
2387 + ;
2388 +
2389 + pinfo->tx_cur = bdp;
2390 +}
2391 +#endif
2392 +
2393 #ifdef CONFIG_CONSOLE_POLL
2394 /* Serial polling routines for writing and reading from the uart while
2395 * in an interrupt or debug context.
2396 @@ -999,7 +1076,7 @@ static void cpm_put_poll_char(struct uart_port *port,
2397 static char ch[2];
2398
2399 ch[0] = (char)c;
2400 - cpm_uart_early_write(pinfo->port.line, ch, 1);
2401 + cpm_uart_early_write(pinfo, ch, 1);
2402 }
2403 #endif /* CONFIG_CONSOLE_POLL */
2404
2405 @@ -1130,9 +1207,6 @@ static void cpm_uart_console_write(struct console *co, const char *s,
2406 u_int count)
2407 {
2408 struct uart_cpm_port *pinfo = &cpm_uart_ports[co->index];
2409 - unsigned int i;
2410 - cbd_t __iomem *bdp, *bdbase;
2411 - unsigned char *cp;
2412 unsigned long flags;
2413 int nolock = oops_in_progress;
2414
2415 @@ -1142,66 +1216,7 @@ static void cpm_uart_console_write(struct console *co, const char *s,
2416 spin_lock_irqsave(&pinfo->port.lock, flags);
2417 }
2418
2419 - /* Get the address of the host memory buffer.
2420 - */
2421 - bdp = pinfo->tx_cur;
2422 - bdbase = pinfo->tx_bd_base;
2423 -
2424 - /*
2425 - * Now, do each character. This is not as bad as it looks
2426 - * since this is a holding FIFO and not a transmitting FIFO.
2427 - * We could add the complexity of filling the entire transmit
2428 - * buffer, but we would just wait longer between accesses......
2429 - */
2430 - for (i = 0; i < count; i++, s++) {
2431 - /* Wait for transmitter fifo to empty.
2432 - * Ready indicates output is ready, and xmt is doing
2433 - * that, not that it is ready for us to send.
2434 - */
2435 - while ((in_be16(&bdp->cbd_sc) & BD_SC_READY) != 0)
2436 - ;
2437 -
2438 - /* Send the character out.
2439 - * If the buffer address is in the CPM DPRAM, don't
2440 - * convert it.
2441 - */
2442 - cp = cpm2cpu_addr(in_be32(&bdp->cbd_bufaddr), pinfo);
2443 - *cp = *s;
2444 -
2445 - out_be16(&bdp->cbd_datlen, 1);
2446 - setbits16(&bdp->cbd_sc, BD_SC_READY);
2447 -
2448 - if (in_be16(&bdp->cbd_sc) & BD_SC_WRAP)
2449 - bdp = bdbase;
2450 - else
2451 - bdp++;
2452 -
2453 - /* if a LF, also do CR... */
2454 - if (*s == 10) {
2455 - while ((in_be16(&bdp->cbd_sc) & BD_SC_READY) != 0)
2456 - ;
2457 -
2458 - cp = cpm2cpu_addr(in_be32(&bdp->cbd_bufaddr), pinfo);
2459 - *cp = 13;
2460 -
2461 - out_be16(&bdp->cbd_datlen, 1);
2462 - setbits16(&bdp->cbd_sc, BD_SC_READY);
2463 -
2464 - if (in_be16(&bdp->cbd_sc) & BD_SC_WRAP)
2465 - bdp = bdbase;
2466 - else
2467 - bdp++;
2468 - }
2469 - }
2470 -
2471 - /*
2472 - * Finally, Wait for transmitter & holding register to empty
2473 - * and restore the IER
2474 - */
2475 - while ((in_be16(&bdp->cbd_sc) & BD_SC_READY) != 0)
2476 - ;
2477 -
2478 - pinfo->tx_cur = bdp;
2479 + cpm_uart_early_write(pinfo, s, count);
2480
2481 if (unlikely(nolock)) {
2482 local_irq_restore(flags);
2483 diff --git a/drivers/ssb/driver_chipcommon.c b/drivers/ssb/driver_chipcommon.c
2484 index 9681536..bbf1cb2 100644
2485 --- a/drivers/ssb/driver_chipcommon.c
2486 +++ b/drivers/ssb/driver_chipcommon.c
2487 @@ -233,6 +233,9 @@ void ssb_chipcommon_init(struct ssb_chipcommon *cc)
2488 {
2489 if (!cc->dev)
2490 return; /* We don't have a ChipCommon */
2491 + if (cc->dev->id.revision >= 11)
2492 + cc->status = chipco_read32(cc, SSB_CHIPCO_CHIPSTAT);
2493 + ssb_dprintk(KERN_INFO PFX "chipcommon status is 0x%x\n", cc->status);
2494 ssb_pmu_init(cc);
2495 chipco_powercontrol_init(cc);
2496 ssb_chipco_set_clockmode(cc, SSB_CLKMODE_FAST);
2497 diff --git a/drivers/ssb/driver_chipcommon_pmu.c b/drivers/ssb/driver_chipcommon_pmu.c
2498 index 64abd11..8e194d5 100644
2499 --- a/drivers/ssb/driver_chipcommon_pmu.c
2500 +++ b/drivers/ssb/driver_chipcommon_pmu.c
2501 @@ -495,9 +495,9 @@ static void ssb_pmu_resources_init(struct ssb_chipcommon *cc)
2502 chipco_write32(cc, SSB_CHIPCO_PMU_MAXRES_MSK, max_msk);
2503 }
2504
2505 +/* http://bcm-v4.sipsolutions.net/802.11/SSB/PmuInit */
2506 void ssb_pmu_init(struct ssb_chipcommon *cc)
2507 {
2508 - struct ssb_bus *bus = cc->dev->bus;
2509 u32 pmucap;
2510
2511 if (!(cc->capabilities & SSB_CHIPCO_CAP_PMU))
2512 @@ -509,15 +509,12 @@ void ssb_pmu_init(struct ssb_chipcommon *cc)
2513 ssb_dprintk(KERN_DEBUG PFX "Found rev %u PMU (capabilities 0x%08X)\n",
2514 cc->pmu.rev, pmucap);
2515
2516 - if (cc->pmu.rev >= 1) {
2517 - if ((bus->chip_id == 0x4325) && (bus->chip_rev < 2)) {
2518 - chipco_mask32(cc, SSB_CHIPCO_PMU_CTL,
2519 - ~SSB_CHIPCO_PMU_CTL_NOILPONW);
2520 - } else {
2521 - chipco_set32(cc, SSB_CHIPCO_PMU_CTL,
2522 - SSB_CHIPCO_PMU_CTL_NOILPONW);
2523 - }
2524 - }
2525 + if (cc->pmu.rev == 1)
2526 + chipco_mask32(cc, SSB_CHIPCO_PMU_CTL,
2527 + ~SSB_CHIPCO_PMU_CTL_NOILPONW);
2528 + else
2529 + chipco_set32(cc, SSB_CHIPCO_PMU_CTL,
2530 + SSB_CHIPCO_PMU_CTL_NOILPONW);
2531 ssb_pmu_pll_init(cc);
2532 ssb_pmu_resources_init(cc);
2533 }
2534 diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c
2535 index 9e50896..17a1781 100644
2536 --- a/drivers/ssb/pci.c
2537 +++ b/drivers/ssb/pci.c
2538 @@ -22,6 +22,7 @@
2539
2540 #include "ssb_private.h"
2541
2542 +bool ssb_is_sprom_available(struct ssb_bus *bus);
2543
2544 /* Define the following to 1 to enable a printk on each coreswitch. */
2545 #define SSB_VERBOSE_PCICORESWITCH_DEBUG 0
2546 @@ -167,7 +168,7 @@ err_pci:
2547 }
2548
2549 /* Get the word-offset for a SSB_SPROM_XXX define. */
2550 -#define SPOFF(offset) (((offset) - SSB_SPROM_BASE) / sizeof(u16))
2551 +#define SPOFF(offset) ((offset) / sizeof(u16))
2552 /* Helper to extract some _offset, which is one of the SSB_SPROM_XXX defines. */
2553 #define SPEX16(_outvar, _offset, _mask, _shift) \
2554 out->_outvar = ((in[SPOFF(_offset)] & (_mask)) >> (_shift))
2555 @@ -252,8 +253,13 @@ static int sprom_do_read(struct ssb_bus *bus, u16 *sprom)
2556 {
2557 int i;
2558
2559 + /* Check if SPROM can be read */
2560 + if (ioread16(bus->mmio + bus->sprom_offset) == 0xFFFF) {
2561 + ssb_printk(KERN_ERR PFX "Unable to read SPROM\n");
2562 + return -ENODEV;
2563 + }
2564 for (i = 0; i < bus->sprom_size; i++)
2565 - sprom[i] = ioread16(bus->mmio + SSB_SPROM_BASE + (i * 2));
2566 + sprom[i] = ioread16(bus->mmio + bus->sprom_offset + (i * 2));
2567
2568 return 0;
2569 }
2570 @@ -284,7 +290,7 @@ static int sprom_do_write(struct ssb_bus *bus, const u16 *sprom)
2571 ssb_printk("75%%");
2572 else if (i % 2)
2573 ssb_printk(".");
2574 - writew(sprom[i], bus->mmio + SSB_SPROM_BASE + (i * 2));
2575 + writew(sprom[i], bus->mmio + bus->sprom_offset + (i * 2));
2576 mmiowb();
2577 msleep(20);
2578 }
2579 @@ -620,21 +626,49 @@ static int ssb_pci_sprom_get(struct ssb_bus *bus,
2580 int err = -ENOMEM;
2581 u16 *buf;
2582
2583 + if (!ssb_is_sprom_available(bus)) {
2584 + ssb_printk(KERN_ERR PFX "No SPROM available!\n");
2585 + return -ENODEV;
2586 + }
2587 + if (bus->chipco.dev) { /* can be unavailible! */
2588 + /*
2589 + * get SPROM offset: SSB_SPROM_BASE1 except for
2590 + * chipcommon rev >= 31 or chip ID is 0x4312 and
2591 + * chipcommon status & 3 == 2
2592 + */
2593 + if (bus->chipco.dev->id.revision >= 31)
2594 + bus->sprom_offset = SSB_SPROM_BASE31;
2595 + else if (bus->chip_id == 0x4312 &&
2596 + (bus->chipco.status & 0x03) == 2)
2597 + bus->sprom_offset = SSB_SPROM_BASE31;
2598 + else
2599 + bus->sprom_offset = SSB_SPROM_BASE1;
2600 + } else {
2601 + bus->sprom_offset = SSB_SPROM_BASE1;
2602 + }
2603 + ssb_dprintk(KERN_INFO PFX "SPROM offset is 0x%x\n", bus->sprom_offset);
2604 +
2605 buf = kcalloc(SSB_SPROMSIZE_WORDS_R123, sizeof(u16), GFP_KERNEL);
2606 if (!buf)
2607 goto out;
2608 bus->sprom_size = SSB_SPROMSIZE_WORDS_R123;
2609 - sprom_do_read(bus, buf);
2610 + err = sprom_do_read(bus, buf);
2611 + if (err)
2612 + goto out_free;
2613 err = sprom_check_crc(buf, bus->sprom_size);
2614 if (err) {
2615 /* try for a 440 byte SPROM - revision 4 and higher */
2616 kfree(buf);
2617 buf = kcalloc(SSB_SPROMSIZE_WORDS_R4, sizeof(u16),
2618 GFP_KERNEL);
2619 - if (!buf)
2620 + if (!buf) {
2621 + err = -ENOMEM;
2622 goto out;
2623 + }
2624 bus->sprom_size = SSB_SPROMSIZE_WORDS_R4;
2625 - sprom_do_read(bus, buf);
2626 + err = sprom_do_read(bus, buf);
2627 + if (err)
2628 + goto out_free;
2629 err = sprom_check_crc(buf, bus->sprom_size);
2630 if (err) {
2631 /* All CRC attempts failed.
2632 diff --git a/drivers/ssb/sprom.c b/drivers/ssb/sprom.c
2633 index eb70843..5f7154d 100644
2634 --- a/drivers/ssb/sprom.c
2635 +++ b/drivers/ssb/sprom.c
2636 @@ -179,3 +179,18 @@ const struct ssb_sprom *ssb_get_fallback_sprom(void)
2637 {
2638 return fallback_sprom;
2639 }
2640 +
2641 +/* http://bcm-v4.sipsolutions.net/802.11/IsSpromAvailable */
2642 +bool ssb_is_sprom_available(struct ssb_bus *bus)
2643 +{
2644 + /* status register only exists on chipcomon rev >= 11 and we need check
2645 + for >= 31 only */
2646 + /* this routine differs from specs as we do not access SPROM directly
2647 + on PCMCIA */
2648 + if (bus->bustype == SSB_BUSTYPE_PCI &&
2649 + bus->chipco.dev && /* can be unavailible! */
2650 + bus->chipco.dev->id.revision >= 31)
2651 + return bus->chipco.capabilities & SSB_CHIPCO_CAP_SPROM;
2652 +
2653 + return true;
2654 +}
2655 diff --git a/drivers/staging/rtl8192su/r8192U_core.c b/drivers/staging/rtl8192su/r8192U_core.c
2656 index 88644ef..6d52d6a 100644
2657 --- a/drivers/staging/rtl8192su/r8192U_core.c
2658 +++ b/drivers/staging/rtl8192su/r8192U_core.c
2659 @@ -112,12 +112,14 @@ u32 rt_global_debug_component = \
2660
2661 static struct usb_device_id rtl8192_usb_id_tbl[] = {
2662 /* Realtek */
2663 + {USB_DEVICE(0x0bda, 0x8171)},
2664 {USB_DEVICE(0x0bda, 0x8192)},
2665 {USB_DEVICE(0x0bda, 0x8709)},
2666 /* Corega */
2667 {USB_DEVICE(0x07aa, 0x0043)},
2668 /* Belkin */
2669 {USB_DEVICE(0x050d, 0x805E)},
2670 + {USB_DEVICE(0x050d, 0x815F)}, /* Belkin F5D8053 v6 */
2671 /* Sitecom */
2672 {USB_DEVICE(0x0df6, 0x0031)},
2673 {USB_DEVICE(0x0df6, 0x004b)}, /* WL-349 */
2674 @@ -127,6 +129,8 @@ static struct usb_device_id rtl8192_usb_id_tbl[] = {
2675 {USB_DEVICE(0x2001, 0x3301)},
2676 /* Zinwell */
2677 {USB_DEVICE(0x5a57, 0x0290)},
2678 + /* Guillemot */
2679 + {USB_DEVICE(0x06f8, 0xe031)},
2680 //92SU
2681 {USB_DEVICE(0x0bda, 0x8172)},
2682 {}
2683 diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
2684 index 34fc7bb..d784a8b 100644
2685 --- a/drivers/usb/core/driver.c
2686 +++ b/drivers/usb/core/driver.c
2687 @@ -1743,9 +1743,6 @@ int usb_external_resume_device(struct usb_device *udev, pm_message_t msg)
2688
2689 static void choose_wakeup(struct usb_device *udev, pm_message_t msg)
2690 {
2691 - int w, i;
2692 - struct usb_interface *intf;
2693 -
2694 /* Remote wakeup is needed only when we actually go to sleep.
2695 * For things like FREEZE and QUIESCE, if the device is already
2696 * autosuspended then its current wakeup setting is okay.
2697 @@ -1755,18 +1752,10 @@ static void choose_wakeup(struct usb_device *udev, pm_message_t msg)
2698 return;
2699 }
2700
2701 - /* If remote wakeup is permitted, see whether any interface drivers
2702 + /* Allow remote wakeup if it is enabled, even if no interface drivers
2703 * actually want it.
2704 */
2705 - w = 0;
2706 - if (device_may_wakeup(&udev->dev) && udev->actconfig) {
2707 - for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) {
2708 - intf = udev->actconfig->interface[i];
2709 - w |= intf->needs_remote_wakeup;
2710 - }
2711 - }
2712 -
2713 - udev->do_remote_wakeup = w;
2714 + udev->do_remote_wakeup = device_may_wakeup(&udev->dev);
2715 }
2716
2717 int usb_suspend(struct device *dev, pm_message_t msg)
2718 diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
2719 index ab93918..a61f160 100644
2720 --- a/drivers/usb/core/quirks.c
2721 +++ b/drivers/usb/core/quirks.c
2722 @@ -41,6 +41,10 @@ static const struct usb_device_id usb_quirk_list[] = {
2723 /* Philips PSC805 audio device */
2724 { USB_DEVICE(0x0471, 0x0155), .driver_info = USB_QUIRK_RESET_RESUME },
2725
2726 + /* Artisman Watchdog Dongle */
2727 + { USB_DEVICE(0x04b4, 0x0526), .driver_info =
2728 + USB_QUIRK_CONFIG_INTF_STRINGS },
2729 +
2730 /* Roland SC-8820 */
2731 { USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME },
2732
2733 @@ -64,6 +68,9 @@ static const struct usb_device_id usb_quirk_list[] = {
2734 /* X-Rite/Gretag-Macbeth Eye-One Pro display colorimeter */
2735 { USB_DEVICE(0x0971, 0x2000), .driver_info = USB_QUIRK_NO_SET_INTF },
2736
2737 + /* Broadcom BCM92035DGROM BT dongle */
2738 + { USB_DEVICE(0x0a5c, 0x2021), .driver_info = USB_QUIRK_RESET_RESUME },
2739 +
2740 /* Action Semiconductor flash disk */
2741 { USB_DEVICE(0x10d6, 0x2200), .driver_info =
2742 USB_QUIRK_STRING_FETCH_255 },
2743 diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
2744 index adf8260..9e5f9f1 100644
2745 --- a/drivers/usb/gadget/u_serial.c
2746 +++ b/drivers/usb/gadget/u_serial.c
2747 @@ -535,17 +535,11 @@ recycle:
2748 list_move(&req->list, &port->read_pool);
2749 }
2750
2751 - /* Push from tty to ldisc; this is immediate with low_latency, and
2752 - * may trigger callbacks to this driver ... so drop the spinlock.
2753 + /* Push from tty to ldisc; without low_latency set this is handled by
2754 + * a workqueue, so we won't get callbacks and can hold port_lock
2755 */
2756 if (tty && do_push) {
2757 - spin_unlock_irq(&port->port_lock);
2758 tty_flip_buffer_push(tty);
2759 - wake_up_interruptible(&tty->read_wait);
2760 - spin_lock_irq(&port->port_lock);
2761 -
2762 - /* tty may have been closed */
2763 - tty = port->port_tty;
2764 }
2765
2766
2767 @@ -783,11 +777,6 @@ static int gs_open(struct tty_struct *tty, struct file *file)
2768 port->open_count = 1;
2769 port->openclose = false;
2770
2771 - /* low_latency means ldiscs work in tasklet context, without
2772 - * needing a workqueue schedule ... easier to keep up.
2773 - */
2774 - tty->low_latency = 1;
2775 -
2776 /* if connected, start the I/O stream */
2777 if (port->port_usb) {
2778 struct gserial *gser = port->port_usb;
2779 @@ -1194,6 +1183,7 @@ void gserial_cleanup(void)
2780 n_ports = 0;
2781
2782 tty_unregister_driver(gs_tty_driver);
2783 + put_tty_driver(gs_tty_driver);
2784 gs_tty_driver = NULL;
2785
2786 pr_debug("%s: cleaned up ttyGS* support\n", __func__);
2787 diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
2788 index 8b37a4b..be41ec3 100644
2789 --- a/drivers/usb/misc/sisusbvga/sisusb.c
2790 +++ b/drivers/usb/misc/sisusbvga/sisusb.c
2791 @@ -2435,7 +2435,8 @@ sisusb_open(struct inode *inode, struct file *file)
2792 }
2793
2794 if (!sisusb->devinit) {
2795 - if (sisusb->sisusb_dev->speed == USB_SPEED_HIGH) {
2796 + if (sisusb->sisusb_dev->speed == USB_SPEED_HIGH ||
2797 + sisusb->sisusb_dev->speed == USB_SPEED_SUPER) {
2798 if (sisusb_init_gfxdevice(sisusb, 0)) {
2799 mutex_unlock(&sisusb->lock);
2800 dev_err(&sisusb->sisusb_dev->dev, "Failed to initialize device\n");
2801 @@ -3167,7 +3168,7 @@ static int sisusb_probe(struct usb_interface *intf,
2802
2803 sisusb->present = 1;
2804
2805 - if (dev->speed == USB_SPEED_HIGH) {
2806 + if (dev->speed == USB_SPEED_HIGH || dev->speed == USB_SPEED_SUPER) {
2807 int initscreen = 1;
2808 #ifdef INCL_SISUSB_CON
2809 if (sisusb_first_vc > 0 &&
2810 diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
2811 index bf56be1..8de8572 100644
2812 --- a/drivers/usb/serial/ftdi_sio.c
2813 +++ b/drivers/usb/serial/ftdi_sio.c
2814 @@ -697,6 +697,7 @@ static struct usb_device_id id_table_combined [] = {
2815 { USB_DEVICE(FTDI_VID, FTDI_NDI_AURORA_SCU_PID),
2816 .driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk },
2817 { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) },
2818 + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_SERIAL_VX7_PID) },
2819 { USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) },
2820 { USB_DEVICE(FTDI_VID, FTDI_PHI_FISCO_PID) },
2821 { USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) },
2822 @@ -743,6 +744,14 @@ static struct usb_device_id id_table_combined [] = {
2823 { USB_DEVICE(FTDI_VID, MJSG_SR_RADIO_PID) },
2824 { USB_DEVICE(FTDI_VID, MJSG_HD_RADIO_PID) },
2825 { USB_DEVICE(FTDI_VID, MJSG_XM_RADIO_PID) },
2826 + { USB_DEVICE(FTDI_VID, XVERVE_SIGNALYZER_ST_PID),
2827 + .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
2828 + { USB_DEVICE(FTDI_VID, XVERVE_SIGNALYZER_SLITE_PID),
2829 + .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
2830 + { USB_DEVICE(FTDI_VID, XVERVE_SIGNALYZER_SH2_PID),
2831 + .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
2832 + { USB_DEVICE(FTDI_VID, XVERVE_SIGNALYZER_SH4_PID),
2833 + .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
2834 { }, /* Optional parameter entry */
2835 { } /* Terminating entry */
2836 };
2837 diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
2838 index 8f9e805..ffdcec7 100644
2839 --- a/drivers/usb/serial/ftdi_sio_ids.h
2840 +++ b/drivers/usb/serial/ftdi_sio_ids.h
2841 @@ -703,6 +703,12 @@
2842 #define TELLDUS_TELLSTICK_PID 0x0C30 /* RF control dongle 433 MHz using FT232RL */
2843
2844 /*
2845 + * RT Systems programming cables for various ham radios
2846 + */
2847 +#define RTSYSTEMS_VID 0x2100 /* Vendor ID */
2848 +#define RTSYSTEMS_SERIAL_VX7_PID 0x9e52 /* Serial converter for VX-7 Radios using FT232RL */
2849 +
2850 +/*
2851 * Bayer Ascensia Contour blood glucose meter USB-converter cable.
2852 * http://winglucofacts.com/cables/
2853 */
2854 @@ -1024,3 +1030,12 @@
2855 #define MJSG_SR_RADIO_PID 0x9379
2856 #define MJSG_XM_RADIO_PID 0x937A
2857 #define MJSG_HD_RADIO_PID 0x937C
2858 +
2859 +/*
2860 + * Xverve Signalyzer tools (http://www.signalyzer.com/)
2861 + */
2862 +#define XVERVE_SIGNALYZER_ST_PID 0xBCA0
2863 +#define XVERVE_SIGNALYZER_SLITE_PID 0xBCA1
2864 +#define XVERVE_SIGNALYZER_SH2_PID 0xBCA2
2865 +#define XVERVE_SIGNALYZER_SH4_PID 0xBCA4
2866 +
2867 diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
2868 index fac0732..2586023 100644
2869 --- a/drivers/usb/serial/option.c
2870 +++ b/drivers/usb/serial/option.c
2871 @@ -226,6 +226,7 @@ static int option_resume(struct usb_serial *serial);
2872 #define AMOI_PRODUCT_H01 0x0800
2873 #define AMOI_PRODUCT_H01A 0x7002
2874 #define AMOI_PRODUCT_H02 0x0802
2875 +#define AMOI_PRODUCT_SKYPEPHONE_S2 0x0407
2876
2877 #define DELL_VENDOR_ID 0x413C
2878
2879 @@ -316,6 +317,7 @@ static int option_resume(struct usb_serial *serial);
2880 #define QISDA_PRODUCT_H21_4512 0x4512
2881 #define QISDA_PRODUCT_H21_4523 0x4523
2882 #define QISDA_PRODUCT_H20_4515 0x4515
2883 +#define QISDA_PRODUCT_H20_4518 0x4518
2884 #define QISDA_PRODUCT_H20_4519 0x4519
2885
2886 /* TLAYTECH PRODUCTS */
2887 @@ -503,6 +505,7 @@ static struct usb_device_id option_ids[] = {
2888 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) },
2889 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) },
2890 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H02) },
2891 + { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_SKYPEPHONE_S2) },
2892
2893 { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5700_MINICARD) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO Mini-Card == Novatel Expedite EV620 CDMA/EV-DO */
2894 { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5500_MINICARD) }, /* Dell Wireless 5500 Mobile Broadband HSDPA Mini-Card == Novatel Expedite EU740 HSDPA/3G */
2895 @@ -836,6 +839,7 @@ static struct usb_device_id option_ids[] = {
2896 { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H21_4512) },
2897 { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H21_4523) },
2898 { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H20_4515) },
2899 + { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H20_4518) },
2900 { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H20_4519) },
2901 { USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_G450) },
2902 { USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_HSDPA_MINICARD ) }, /* Toshiba 3G HSDPA == Novatel Expedite EU870D MiniCard */
2903 diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
2904 index 54f8494..328578b 100644
2905 --- a/drivers/usb/serial/sierra.c
2906 +++ b/drivers/usb/serial/sierra.c
2907 @@ -210,6 +210,7 @@ static struct usb_device_id id_table [] = {
2908 { USB_DEVICE(0x1199, 0x0021) }, /* Sierra Wireless AirCard 597E */
2909 { USB_DEVICE(0x1199, 0x0112) }, /* Sierra Wireless AirCard 580 */
2910 { USB_DEVICE(0x1199, 0x0120) }, /* Sierra Wireless USB Dongle 595U */
2911 + { USB_DEVICE(0x1199, 0x0301) }, /* Sierra Wireless USB Dongle 250U */
2912 /* Sierra Wireless C597 */
2913 { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x0023, 0xFF, 0xFF, 0xFF) },
2914 /* Sierra Wireless T598 */
2915 diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
2916 index 7127bfe..d43859f 100644
2917 --- a/drivers/virtio/virtio_pci.c
2918 +++ b/drivers/virtio/virtio_pci.c
2919 @@ -635,6 +635,9 @@ static int __devinit virtio_pci_probe(struct pci_dev *pci_dev,
2920 INIT_LIST_HEAD(&vp_dev->virtqueues);
2921 spin_lock_init(&vp_dev->lock);
2922
2923 + /* Disable MSI/MSIX to bring device to a known good state. */
2924 + pci_msi_off(pci_dev);
2925 +
2926 /* enable the device */
2927 err = pci_enable_device(pci_dev);
2928 if (err)
2929 diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
2930 index 64f6b2f..b9840fa 100644
2931 --- a/fs/btrfs/ioctl.c
2932 +++ b/fs/btrfs/ioctl.c
2933 @@ -947,7 +947,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
2934 */
2935
2936 /* the destination must be opened for writing */
2937 - if (!(file->f_mode & FMODE_WRITE))
2938 + if (!(file->f_mode & FMODE_WRITE) || (file->f_flags & O_APPEND))
2939 return -EINVAL;
2940
2941 ret = mnt_want_write(file->f_path.mnt);
2942 @@ -1000,7 +1000,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
2943
2944 /* determine range to clone */
2945 ret = -EINVAL;
2946 - if (off >= src->i_size || off + len > src->i_size)
2947 + if (off + len > src->i_size || off + len < off)
2948 goto out_unlock;
2949 if (len == 0)
2950 olen = len = src->i_size - off;
2951 diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
2952 index 29f1da7..1445407 100644
2953 --- a/fs/cifs/cifsfs.c
2954 +++ b/fs/cifs/cifsfs.c
2955 @@ -1033,7 +1033,7 @@ init_cifs(void)
2956 goto out_unregister_filesystem;
2957 #endif
2958 #ifdef CONFIG_CIFS_DFS_UPCALL
2959 - rc = register_key_type(&key_type_dns_resolver);
2960 + rc = cifs_init_dns_resolver();
2961 if (rc)
2962 goto out_unregister_key_type;
2963 #endif
2964 @@ -1045,7 +1045,7 @@ init_cifs(void)
2965
2966 out_unregister_resolver_key:
2967 #ifdef CONFIG_CIFS_DFS_UPCALL
2968 - unregister_key_type(&key_type_dns_resolver);
2969 + cifs_exit_dns_resolver();
2970 out_unregister_key_type:
2971 #endif
2972 #ifdef CONFIG_CIFS_UPCALL
2973 @@ -1071,7 +1071,7 @@ exit_cifs(void)
2974 cifs_proc_clean();
2975 #ifdef CONFIG_CIFS_DFS_UPCALL
2976 cifs_dfs_release_automount_timer();
2977 - unregister_key_type(&key_type_dns_resolver);
2978 + cifs_exit_dns_resolver();
2979 #endif
2980 #ifdef CONFIG_CIFS_UPCALL
2981 unregister_key_type(&cifs_spnego_key_type);
2982 diff --git a/fs/cifs/dns_resolve.c b/fs/cifs/dns_resolve.c
2983 index 8794814..16f31c1 100644
2984 --- a/fs/cifs/dns_resolve.c
2985 +++ b/fs/cifs/dns_resolve.c
2986 @@ -23,12 +23,16 @@
2987 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
2988 */
2989
2990 +#include <linux/keyctl.h>
2991 +#include <linux/key-type.h>
2992 #include <keys/user-type.h>
2993 #include "dns_resolve.h"
2994 #include "cifsglob.h"
2995 #include "cifsproto.h"
2996 #include "cifs_debug.h"
2997
2998 +static const struct cred *dns_resolver_cache;
2999 +
3000 /* Checks if supplied name is IP address
3001 * returns:
3002 * 1 - name is IP
3003 @@ -93,6 +97,7 @@ struct key_type key_type_dns_resolver = {
3004 int
3005 dns_resolve_server_name_to_ip(const char *unc, char **ip_addr)
3006 {
3007 + const struct cred *saved_cred;
3008 int rc = -EAGAIN;
3009 struct key *rkey = ERR_PTR(-EAGAIN);
3010 char *name;
3011 @@ -132,8 +137,15 @@ dns_resolve_server_name_to_ip(const char *unc, char **ip_addr)
3012 goto skip_upcall;
3013 }
3014
3015 + saved_cred = override_creds(dns_resolver_cache);
3016 rkey = request_key(&key_type_dns_resolver, name, "");
3017 + revert_creds(saved_cred);
3018 if (!IS_ERR(rkey)) {
3019 + if (!(rkey->perm & KEY_USR_VIEW)) {
3020 + down_read(&rkey->sem);
3021 + rkey->perm |= KEY_USR_VIEW;
3022 + up_read(&rkey->sem);
3023 + }
3024 len = rkey->type_data.x[0];
3025 data = rkey->payload.data;
3026 } else {
3027 @@ -164,4 +176,61 @@ out:
3028 return rc;
3029 }
3030
3031 +int __init cifs_init_dns_resolver(void)
3032 +{
3033 + struct cred *cred;
3034 + struct key *keyring;
3035 + int ret;
3036 +
3037 + printk(KERN_NOTICE "Registering the %s key type\n",
3038 + key_type_dns_resolver.name);
3039 +
3040 + /* create an override credential set with a special thread keyring in
3041 + * which DNS requests are cached
3042 + *
3043 + * this is used to prevent malicious redirections from being installed
3044 + * with add_key().
3045 + */
3046 + cred = prepare_kernel_cred(NULL);
3047 + if (!cred)
3048 + return -ENOMEM;
3049 +
3050 + keyring = key_alloc(&key_type_keyring, ".dns_resolver", 0, 0, cred,
3051 + (KEY_POS_ALL & ~KEY_POS_SETATTR) |
3052 + KEY_USR_VIEW | KEY_USR_READ,
3053 + KEY_ALLOC_NOT_IN_QUOTA);
3054 + if (IS_ERR(keyring)) {
3055 + ret = PTR_ERR(keyring);
3056 + goto failed_put_cred;
3057 + }
3058 +
3059 + ret = key_instantiate_and_link(keyring, NULL, 0, NULL, NULL);
3060 + if (ret < 0)
3061 + goto failed_put_key;
3062 +
3063 + ret = register_key_type(&key_type_dns_resolver);
3064 + if (ret < 0)
3065 + goto failed_put_key;
3066 +
3067 + /* instruct request_key() to use this special keyring as a cache for
3068 + * the results it looks up */
3069 + cred->thread_keyring = keyring;
3070 + cred->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING;
3071 + dns_resolver_cache = cred;
3072 + return 0;
3073 +
3074 +failed_put_key:
3075 + key_put(keyring);
3076 +failed_put_cred:
3077 + put_cred(cred);
3078 + return ret;
3079 +}
3080
3081 +void __exit cifs_exit_dns_resolver(void)
3082 +{
3083 + key_revoke(dns_resolver_cache->thread_keyring);
3084 + unregister_key_type(&key_type_dns_resolver);
3085 + put_cred(dns_resolver_cache);
3086 + printk(KERN_NOTICE "Unregistered %s key type\n",
3087 + key_type_dns_resolver.name);
3088 +}
3089 diff --git a/fs/cifs/dns_resolve.h b/fs/cifs/dns_resolve.h
3090 index 966e928..26b9eaa 100644
3091 --- a/fs/cifs/dns_resolve.h
3092 +++ b/fs/cifs/dns_resolve.h
3093 @@ -24,8 +24,8 @@
3094 #define _DNS_RESOLVE_H
3095
3096 #ifdef __KERNEL__
3097 -#include <linux/key-type.h>
3098 -extern struct key_type key_type_dns_resolver;
3099 +extern int __init cifs_init_dns_resolver(void);
3100 +extern void __exit cifs_exit_dns_resolver(void);
3101 extern int dns_resolve_server_name_to_ip(const char *unc, char **ip_addr);
3102 #endif /* KERNEL */
3103
3104 diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
3105 index a104ca3..303fd7f 100644
3106 --- a/fs/cifs/inode.c
3107 +++ b/fs/cifs/inode.c
3108 @@ -1284,6 +1284,10 @@ cifs_do_rename(int xid, struct dentry *from_dentry, const char *fromPath,
3109 if (rc == 0 || rc != -ETXTBSY)
3110 return rc;
3111
3112 + /* open-file renames don't work across directories */
3113 + if (to_dentry->d_parent != from_dentry->d_parent)
3114 + return rc;
3115 +
3116 /* open the file to be renamed -- we need DELETE perms */
3117 rc = CIFSSMBOpen(xid, pTcon, fromPath, FILE_OPEN, DELETE,
3118 CREATE_NOT_DIR, &srcfid, &oplock, NULL,
3119 diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
3120 index 7085a62..6d6ff4f 100644
3121 --- a/fs/cifs/sess.c
3122 +++ b/fs/cifs/sess.c
3123 @@ -723,15 +723,7 @@ ssetup_ntlmssp_authenticate:
3124
3125 /* calculate session key */
3126 setup_ntlmv2_rsp(ses, v2_sess_key, nls_cp);
3127 - if (first_time) /* should this be moved into common code
3128 - with similar ntlmv2 path? */
3129 - /* cifs_calculate_ntlmv2_mac_key(ses->server->mac_signing_key,
3130 - response BB FIXME, v2_sess_key); */
3131 -
3132 - /* copy session key */
3133 -
3134 - /* memcpy(bcc_ptr, (char *)ntlm_session_key,LM2_SESS_KEY_SIZE);
3135 - bcc_ptr += LM2_SESS_KEY_SIZE; */
3136 + /* FIXME: calculate MAC key */
3137 memcpy(bcc_ptr, (char *)v2_sess_key,
3138 sizeof(struct ntlmv2_resp));
3139 bcc_ptr += sizeof(struct ntlmv2_resp);
3140 diff --git a/fs/ecryptfs/messaging.c b/fs/ecryptfs/messaging.c
3141 index f1c17e8..3dfe7ce 100644
3142 --- a/fs/ecryptfs/messaging.c
3143 +++ b/fs/ecryptfs/messaging.c
3144 @@ -30,9 +30,9 @@ static struct mutex ecryptfs_msg_ctx_lists_mux;
3145
3146 static struct hlist_head *ecryptfs_daemon_hash;
3147 struct mutex ecryptfs_daemon_hash_mux;
3148 -static int ecryptfs_hash_buckets;
3149 +static int ecryptfs_hash_bits;
3150 #define ecryptfs_uid_hash(uid) \
3151 - hash_long((unsigned long)uid, ecryptfs_hash_buckets)
3152 + hash_long((unsigned long)uid, ecryptfs_hash_bits)
3153
3154 static u32 ecryptfs_msg_counter;
3155 static struct ecryptfs_msg_ctx *ecryptfs_msg_ctx_arr;
3156 @@ -485,18 +485,19 @@ int ecryptfs_init_messaging(void)
3157 }
3158 mutex_init(&ecryptfs_daemon_hash_mux);
3159 mutex_lock(&ecryptfs_daemon_hash_mux);
3160 - ecryptfs_hash_buckets = 1;
3161 - while (ecryptfs_number_of_users >> ecryptfs_hash_buckets)
3162 - ecryptfs_hash_buckets++;
3163 + ecryptfs_hash_bits = 1;
3164 + while (ecryptfs_number_of_users >> ecryptfs_hash_bits)
3165 + ecryptfs_hash_bits++;
3166 ecryptfs_daemon_hash = kmalloc((sizeof(struct hlist_head)
3167 - * ecryptfs_hash_buckets), GFP_KERNEL);
3168 + * (1 << ecryptfs_hash_bits)),
3169 + GFP_KERNEL);
3170 if (!ecryptfs_daemon_hash) {
3171 rc = -ENOMEM;
3172 printk(KERN_ERR "%s: Failed to allocate memory\n", __func__);
3173 mutex_unlock(&ecryptfs_daemon_hash_mux);
3174 goto out;
3175 }
3176 - for (i = 0; i < ecryptfs_hash_buckets; i++)
3177 + for (i = 0; i < (1 << ecryptfs_hash_bits); i++)
3178 INIT_HLIST_HEAD(&ecryptfs_daemon_hash[i]);
3179 mutex_unlock(&ecryptfs_daemon_hash_mux);
3180 ecryptfs_msg_ctx_arr = kmalloc((sizeof(struct ecryptfs_msg_ctx)
3181 @@ -553,7 +554,7 @@ void ecryptfs_release_messaging(void)
3182 int i;
3183
3184 mutex_lock(&ecryptfs_daemon_hash_mux);
3185 - for (i = 0; i < ecryptfs_hash_buckets; i++) {
3186 + for (i = 0; i < (1 << ecryptfs_hash_bits); i++) {
3187 int rc;
3188
3189 hlist_for_each_entry(daemon, elem,
3190 diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
3191 index 9dc93168..aa6fb6b 100644
3192 --- a/fs/ext4/dir.c
3193 +++ b/fs/ext4/dir.c
3194 @@ -84,9 +84,11 @@ int ext4_check_dir_entry(const char *function, struct inode *dir,
3195
3196 if (error_msg != NULL)
3197 ext4_error(dir->i_sb, function,
3198 - "bad entry in directory #%lu: %s - "
3199 - "offset=%u, inode=%u, rec_len=%d, name_len=%d",
3200 - dir->i_ino, error_msg, offset,
3201 + "bad entry in directory #%lu: %s - block=%llu"
3202 + "offset=%u(%u), inode=%u, rec_len=%d, name_len=%d",
3203 + dir->i_ino, error_msg,
3204 + (unsigned long long) bh->b_blocknr,
3205 + (unsigned) (offset%bh->b_size), offset,
3206 le32_to_cpu(de->inode),
3207 rlen, de->name_len);
3208 return error_msg == NULL ? 1 : 0;
3209 @@ -109,7 +111,7 @@ static int ext4_readdir(struct file *filp,
3210
3211 if (EXT4_HAS_COMPAT_FEATURE(inode->i_sb,
3212 EXT4_FEATURE_COMPAT_DIR_INDEX) &&
3213 - ((EXT4_I(inode)->i_flags & EXT4_INDEX_FL) ||
3214 + ((ext4_test_inode_flag(inode, EXT4_INODE_INDEX)) ||
3215 ((inode->i_size >> sb->s_blocksize_bits) == 1))) {
3216 err = ext4_dx_readdir(filp, dirent, filldir);
3217 if (err != ERR_BAD_DX_DIR) {
3218 @@ -120,7 +122,7 @@ static int ext4_readdir(struct file *filp,
3219 * We don't set the inode dirty flag since it's not
3220 * critical that it get flushed back to the disk.
3221 */
3222 - EXT4_I(filp->f_path.dentry->d_inode)->i_flags &= ~EXT4_INDEX_FL;
3223 + ext4_clear_inode_flag(filp->f_path.dentry->d_inode, EXT4_INODE_INDEX);
3224 }
3225 stored = 0;
3226 offset = filp->f_pos & (sb->s_blocksize - 1);
3227 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
3228 index 4a825c1..fa6b79f 100644
3229 --- a/fs/ext4/ext4.h
3230 +++ b/fs/ext4/ext4.h
3231 @@ -29,6 +29,9 @@
3232 #include <linux/wait.h>
3233 #include <linux/blockgroup_lock.h>
3234 #include <linux/percpu_counter.h>
3235 +#ifdef __KERNEL__
3236 +#include <linux/compat.h>
3237 +#endif
3238
3239 /*
3240 * The fourth extended filesystem constants/structures
3241 @@ -284,10 +287,12 @@ struct flex_groups {
3242 #define EXT4_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/
3243 #define EXT4_HUGE_FILE_FL 0x00040000 /* Set to each huge file */
3244 #define EXT4_EXTENTS_FL 0x00080000 /* Inode uses extents */
3245 +#define EXT4_EA_INODE_FL 0x00200000 /* Inode used for large EA */
3246 +#define EXT4_EOFBLOCKS_FL 0x00400000 /* Blocks allocated beyond EOF */
3247 #define EXT4_RESERVED_FL 0x80000000 /* reserved for ext4 lib */
3248
3249 -#define EXT4_FL_USER_VISIBLE 0x000BDFFF /* User visible flags */
3250 -#define EXT4_FL_USER_MODIFIABLE 0x000B80FF /* User modifiable flags */
3251 +#define EXT4_FL_USER_VISIBLE 0x004BDFFF /* User visible flags */
3252 +#define EXT4_FL_USER_MODIFIABLE 0x004B80FF /* User modifiable flags */
3253
3254 /* Flags that should be inherited by new inodes from their parent. */
3255 #define EXT4_FL_INHERITED (EXT4_SECRM_FL | EXT4_UNRM_FL | EXT4_COMPR_FL |\
3256 @@ -314,15 +319,81 @@ static inline __u32 ext4_mask_flags(umode_t mode, __u32 flags)
3257 }
3258
3259 /*
3260 - * Inode dynamic state flags
3261 + * Inode flags used for atomic set/get
3262 + */
3263 +enum {
3264 + EXT4_INODE_SECRM = 0, /* Secure deletion */
3265 + EXT4_INODE_UNRM = 1, /* Undelete */
3266 + EXT4_INODE_COMPR = 2, /* Compress file */
3267 + EXT4_INODE_SYNC = 3, /* Synchronous updates */
3268 + EXT4_INODE_IMMUTABLE = 4, /* Immutable file */
3269 + EXT4_INODE_APPEND = 5, /* writes to file may only append */
3270 + EXT4_INODE_NODUMP = 6, /* do not dump file */
3271 + EXT4_INODE_NOATIME = 7, /* do not update atime */
3272 +/* Reserved for compression usage... */
3273 + EXT4_INODE_DIRTY = 8,
3274 + EXT4_INODE_COMPRBLK = 9, /* One or more compressed clusters */
3275 + EXT4_INODE_NOCOMPR = 10, /* Don't compress */
3276 + EXT4_INODE_ECOMPR = 11, /* Compression error */
3277 +/* End compression flags --- maybe not all used */
3278 + EXT4_INODE_INDEX = 12, /* hash-indexed directory */
3279 + EXT4_INODE_IMAGIC = 13, /* AFS directory */
3280 + EXT4_INODE_JOURNAL_DATA = 14, /* file data should be journaled */
3281 + EXT4_INODE_NOTAIL = 15, /* file tail should not be merged */
3282 + EXT4_INODE_DIRSYNC = 16, /* dirsync behaviour (directories only) */
3283 + EXT4_INODE_TOPDIR = 17, /* Top of directory hierarchies*/
3284 + EXT4_INODE_HUGE_FILE = 18, /* Set to each huge file */
3285 + EXT4_INODE_EXTENTS = 19, /* Inode uses extents */
3286 + EXT4_INODE_EA_INODE = 21, /* Inode used for large EA */
3287 + EXT4_INODE_EOFBLOCKS = 22, /* Blocks allocated beyond EOF */
3288 + EXT4_INODE_RESERVED = 31, /* reserved for ext4 lib */
3289 +};
3290 +
3291 +#define TEST_FLAG_VALUE(FLAG) (EXT4_##FLAG##_FL == (1 << EXT4_INODE_##FLAG))
3292 +#define CHECK_FLAG_VALUE(FLAG) if (!TEST_FLAG_VALUE(FLAG)) { \
3293 + printk(KERN_EMERG "EXT4 flag fail: " #FLAG ": %d %d\n", \
3294 + EXT4_##FLAG##_FL, EXT4_INODE_##FLAG); BUG_ON(1); }
3295 +
3296 +/*
3297 + * Since it's pretty easy to mix up bit numbers and hex values, and we
3298 + * can't do a compile-time test for ENUM values, we use a run-time
3299 + * test to make sure that EXT4_XXX_FL is consistent with respect to
3300 + * EXT4_INODE_XXX. If all is well the printk and BUG_ON will all drop
3301 + * out so it won't cost any extra space in the compiled kernel image.
3302 + * But it's important that these values are the same, since we are
3303 + * using EXT4_INODE_XXX to test for the flag values, but EXT4_XX_FL
3304 + * must be consistent with the values of FS_XXX_FL defined in
3305 + * include/linux/fs.h and the on-disk values found in ext2, ext3, and
3306 + * ext4 filesystems, and of course the values defined in e2fsprogs.
3307 + *
3308 + * It's not paranoia if the Murphy's Law really *is* out to get you. :-)
3309 */
3310 -#define EXT4_STATE_JDATA 0x00000001 /* journaled data exists */
3311 -#define EXT4_STATE_NEW 0x00000002 /* inode is newly created */
3312 -#define EXT4_STATE_XATTR 0x00000004 /* has in-inode xattrs */
3313 -#define EXT4_STATE_NO_EXPAND 0x00000008 /* No space for expansion */
3314 -#define EXT4_STATE_DA_ALLOC_CLOSE 0x00000010 /* Alloc DA blks on close */
3315 -#define EXT4_STATE_EXT_MIGRATE 0x00000020 /* Inode is migrating */
3316 -#define EXT4_STATE_DIO_UNWRITTEN 0x00000040 /* need convert on dio done*/
3317 +static inline void ext4_check_flag_values(void)
3318 +{
3319 + CHECK_FLAG_VALUE(SECRM);
3320 + CHECK_FLAG_VALUE(UNRM);
3321 + CHECK_FLAG_VALUE(COMPR);
3322 + CHECK_FLAG_VALUE(SYNC);
3323 + CHECK_FLAG_VALUE(IMMUTABLE);
3324 + CHECK_FLAG_VALUE(APPEND);
3325 + CHECK_FLAG_VALUE(NODUMP);
3326 + CHECK_FLAG_VALUE(NOATIME);
3327 + CHECK_FLAG_VALUE(DIRTY);
3328 + CHECK_FLAG_VALUE(COMPRBLK);
3329 + CHECK_FLAG_VALUE(NOCOMPR);
3330 + CHECK_FLAG_VALUE(ECOMPR);
3331 + CHECK_FLAG_VALUE(INDEX);
3332 + CHECK_FLAG_VALUE(IMAGIC);
3333 + CHECK_FLAG_VALUE(JOURNAL_DATA);
3334 + CHECK_FLAG_VALUE(NOTAIL);
3335 + CHECK_FLAG_VALUE(DIRSYNC);
3336 + CHECK_FLAG_VALUE(TOPDIR);
3337 + CHECK_FLAG_VALUE(HUGE_FILE);
3338 + CHECK_FLAG_VALUE(EXTENTS);
3339 + CHECK_FLAG_VALUE(EA_INODE);
3340 + CHECK_FLAG_VALUE(EOFBLOCKS);
3341 + CHECK_FLAG_VALUE(RESERVED);
3342 +}
3343
3344 /* Used to pass group descriptor data when online resize is done */
3345 struct ext4_new_group_input {
3346 @@ -335,6 +406,18 @@ struct ext4_new_group_input {
3347 __u16 unused;
3348 };
3349
3350 +#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
3351 +struct compat_ext4_new_group_input {
3352 + u32 group;
3353 + compat_u64 block_bitmap;
3354 + compat_u64 inode_bitmap;
3355 + compat_u64 inode_table;
3356 + u32 blocks_count;
3357 + u16 reserved_blocks;
3358 + u16 unused;
3359 +};
3360 +#endif
3361 +
3362 /* The struct ext4_new_group_input in kernel space, with free_blocks_count */
3363 struct ext4_new_group_data {
3364 __u32 group;
3365 @@ -361,14 +444,11 @@ struct ext4_new_group_data {
3366 so set the magic i_delalloc_reserve_flag after taking the
3367 inode allocation semaphore for */
3368 #define EXT4_GET_BLOCKS_DELALLOC_RESERVE 0x0004
3369 - /* Call ext4_da_update_reserve_space() after successfully
3370 - allocating the blocks */
3371 -#define EXT4_GET_BLOCKS_UPDATE_RESERVE_SPACE 0x0008
3372 /* caller is from the direct IO path, request to creation of an
3373 unitialized extents if not allocated, split the uninitialized
3374 extent if blocks has been preallocated already*/
3375 -#define EXT4_GET_BLOCKS_DIO 0x0010
3376 -#define EXT4_GET_BLOCKS_CONVERT 0x0020
3377 +#define EXT4_GET_BLOCKS_DIO 0x0008
3378 +#define EXT4_GET_BLOCKS_CONVERT 0x0010
3379 #define EXT4_GET_BLOCKS_DIO_CREATE_EXT (EXT4_GET_BLOCKS_DIO|\
3380 EXT4_GET_BLOCKS_CREATE_UNINIT_EXT)
3381 /* Convert extent to initialized after direct IO complete */
3382 @@ -397,6 +477,7 @@ struct ext4_new_group_data {
3383 #define EXT4_IOC_ALLOC_DA_BLKS _IO('f', 12)
3384 #define EXT4_IOC_MOVE_EXT _IOWR('f', 15, struct move_extent)
3385
3386 +#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
3387 /*
3388 * ioctl commands in 32 bit emulation
3389 */
3390 @@ -407,11 +488,13 @@ struct ext4_new_group_data {
3391 #define EXT4_IOC32_GETRSVSZ _IOR('f', 5, int)
3392 #define EXT4_IOC32_SETRSVSZ _IOW('f', 6, int)
3393 #define EXT4_IOC32_GROUP_EXTEND _IOW('f', 7, unsigned int)
3394 +#define EXT4_IOC32_GROUP_ADD _IOW('f', 8, struct compat_ext4_new_group_input)
3395 #ifdef CONFIG_JBD2_DEBUG
3396 #define EXT4_IOC32_WAIT_FOR_READONLY _IOR('f', 99, int)
3397 #endif
3398 #define EXT4_IOC32_GETVERSION_OLD FS_IOC32_GETVERSION
3399 #define EXT4_IOC32_SETVERSION_OLD FS_IOC32_SETVERSION
3400 +#endif
3401
3402
3403 /*
3404 @@ -615,9 +698,8 @@ struct ext4_ext_cache {
3405 */
3406 struct ext4_inode_info {
3407 __le32 i_data[15]; /* unconverted */
3408 - __u32 i_flags;
3409 - ext4_fsblk_t i_file_acl;
3410 __u32 i_dtime;
3411 + ext4_fsblk_t i_file_acl;
3412
3413 /*
3414 * i_block_group is the number of the block group which contains
3415 @@ -627,7 +709,8 @@ struct ext4_inode_info {
3416 * near to their parent directory's inode.
3417 */
3418 ext4_group_t i_block_group;
3419 - __u32 i_state; /* Dynamic state flags for ext4 */
3420 + unsigned long i_state_flags; /* Dynamic state flags */
3421 + unsigned long i_flags;
3422
3423 ext4_lblk_t i_dir_start_lookup;
3424 #ifdef CONFIG_EXT4_FS_XATTR
3425 @@ -693,6 +776,8 @@ struct ext4_inode_info {
3426 unsigned int i_reserved_meta_blocks;
3427 unsigned int i_allocated_meta_blocks;
3428 unsigned short i_delalloc_reserved_flag;
3429 + sector_t i_da_metadata_calc_last_lblock;
3430 + int i_da_metadata_calc_len;
3431
3432 /* on-disk additional length */
3433 __u16 i_extra_isize;
3434 @@ -1045,6 +1130,37 @@ static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino)
3435 (ino >= EXT4_FIRST_INO(sb) &&
3436 ino <= le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count));
3437 }
3438 +
3439 +/*
3440 + * Inode dynamic state flags
3441 + */
3442 +enum {
3443 + EXT4_STATE_JDATA, /* journaled data exists */
3444 + EXT4_STATE_NEW, /* inode is newly created */
3445 + EXT4_STATE_XATTR, /* has in-inode xattrs */
3446 + EXT4_STATE_NO_EXPAND, /* No space for expansion */
3447 + EXT4_STATE_DA_ALLOC_CLOSE, /* Alloc DA blks on close */
3448 + EXT4_STATE_EXT_MIGRATE, /* Inode is migrating */
3449 + EXT4_STATE_DIO_UNWRITTEN, /* need convert on dio done*/
3450 + EXT4_STATE_NEWENTRY, /* File just added to dir */
3451 +};
3452 +
3453 +#define EXT4_INODE_BIT_FNS(name, field) \
3454 +static inline int ext4_test_inode_##name(struct inode *inode, int bit) \
3455 +{ \
3456 + return test_bit(bit, &EXT4_I(inode)->i_##field); \
3457 +} \
3458 +static inline void ext4_set_inode_##name(struct inode *inode, int bit) \
3459 +{ \
3460 + set_bit(bit, &EXT4_I(inode)->i_##field); \
3461 +} \
3462 +static inline void ext4_clear_inode_##name(struct inode *inode, int bit) \
3463 +{ \
3464 + clear_bit(bit, &EXT4_I(inode)->i_##field); \
3465 +}
3466 +
3467 +EXT4_INODE_BIT_FNS(flag, flags)
3468 +EXT4_INODE_BIT_FNS(state, state_flags)
3469 #else
3470 /* Assume that user mode programs are passing in an ext4fs superblock, not
3471 * a kernel struct super_block. This will allow us to call the feature-test
3472 @@ -1229,7 +1345,7 @@ struct ext4_dir_entry_2 {
3473
3474 #define is_dx(dir) (EXT4_HAS_COMPAT_FEATURE(dir->i_sb, \
3475 EXT4_FEATURE_COMPAT_DIR_INDEX) && \
3476 - (EXT4_I(dir)->i_flags & EXT4_INDEX_FL))
3477 + ext4_test_inode_flag((dir), EXT4_INODE_INDEX))
3478 #define EXT4_DIR_LINK_MAX(dir) (!is_dx(dir) && (dir)->i_nlink >= EXT4_LINK_MAX)
3479 #define EXT4_DIR_LINK_EMPTY(dir) ((dir)->i_nlink == 2 || (dir)->i_nlink == 1)
3480
3481 @@ -1438,6 +1554,8 @@ extern int ext4_block_truncate_page(handle_t *handle,
3482 extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
3483 extern qsize_t *ext4_get_reserved_space(struct inode *inode);
3484 extern int flush_aio_dio_completed_IO(struct inode *inode);
3485 +extern void ext4_da_update_reserve_space(struct inode *inode,
3486 + int used, int quota_claim);
3487 /* ioctl.c */
3488 extern long ext4_ioctl(struct file *, unsigned int, unsigned long);
3489 extern long ext4_compat_ioctl(struct file *, unsigned int, unsigned long);
3490 @@ -1637,6 +1755,7 @@ struct ext4_group_info {
3491 ext4_grpblk_t bb_first_free; /* first free block */
3492 ext4_grpblk_t bb_free; /* total free blocks */
3493 ext4_grpblk_t bb_fragments; /* nr of freespace fragments */
3494 + ext4_grpblk_t bb_largest_free_order;/* order of largest frag in BG */
3495 struct list_head bb_prealloc_list;
3496 #ifdef DOUBLE_CHECK
3497 void *bb_bitmap;
3498 diff --git a/fs/ext4/ext4_extents.h b/fs/ext4/ext4_extents.h
3499 index 2ca6864..bdb6ce7 100644
3500 --- a/fs/ext4/ext4_extents.h
3501 +++ b/fs/ext4/ext4_extents.h
3502 @@ -225,7 +225,8 @@ static inline void ext4_ext_mark_initialized(struct ext4_extent *ext)
3503 ext->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ext));
3504 }
3505
3506 -extern int ext4_ext_calc_metadata_amount(struct inode *inode, int blocks);
3507 +extern int ext4_ext_calc_metadata_amount(struct inode *inode,
3508 + sector_t lblocks);
3509 extern ext4_fsblk_t ext_pblock(struct ext4_extent *ex);
3510 extern ext4_fsblk_t idx_pblock(struct ext4_extent_idx *);
3511 extern void ext4_ext_store_pblock(struct ext4_extent *, ext4_fsblk_t);
3512 diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
3513 index 6a94099..496249a 100644
3514 --- a/fs/ext4/ext4_jbd2.c
3515 +++ b/fs/ext4/ext4_jbd2.c
3516 @@ -89,7 +89,7 @@ int __ext4_handle_dirty_metadata(const char *where, handle_t *handle,
3517 ext4_journal_abort_handle(where, __func__, bh,
3518 handle, err);
3519 } else {
3520 - if (inode && bh)
3521 + if (inode)
3522 mark_buffer_dirty_inode(bh, inode);
3523 else
3524 mark_buffer_dirty(bh);
3525 diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h
3526 index 1892a77..386095d 100644
3527 --- a/fs/ext4/ext4_jbd2.h
3528 +++ b/fs/ext4/ext4_jbd2.h
3529 @@ -282,7 +282,7 @@ static inline int ext4_should_journal_data(struct inode *inode)
3530 return 1;
3531 if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
3532 return 1;
3533 - if (EXT4_I(inode)->i_flags & EXT4_JOURNAL_DATA_FL)
3534 + if (ext4_test_inode_flag(inode, EXT4_INODE_JOURNAL_DATA))
3535 return 1;
3536 return 0;
3537 }
3538 @@ -293,7 +293,7 @@ static inline int ext4_should_order_data(struct inode *inode)
3539 return 0;
3540 if (!S_ISREG(inode->i_mode))
3541 return 0;
3542 - if (EXT4_I(inode)->i_flags & EXT4_JOURNAL_DATA_FL)
3543 + if (ext4_test_inode_flag(inode, EXT4_INODE_JOURNAL_DATA))
3544 return 0;
3545 if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
3546 return 1;
3547 @@ -306,7 +306,7 @@ static inline int ext4_should_writeback_data(struct inode *inode)
3548 return 0;
3549 if (EXT4_JOURNAL(inode) == NULL)
3550 return 1;
3551 - if (EXT4_I(inode)->i_flags & EXT4_JOURNAL_DATA_FL)
3552 + if (ext4_test_inode_flag(inode, EXT4_INODE_JOURNAL_DATA))
3553 return 0;
3554 if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)
3555 return 1;
3556 diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
3557 index 9e21653..99482ea 100644
3558 --- a/fs/ext4/extents.c
3559 +++ b/fs/ext4/extents.c
3560 @@ -107,11 +107,8 @@ static int ext4_ext_truncate_extend_restart(handle_t *handle,
3561 if (err <= 0)
3562 return err;
3563 err = ext4_truncate_restart_trans(handle, inode, needed);
3564 - /*
3565 - * We have dropped i_data_sem so someone might have cached again
3566 - * an extent we are going to truncate.
3567 - */
3568 - ext4_ext_invalidate_cache(inode);
3569 + if (err == 0)
3570 + err = -EAGAIN;
3571
3572 return err;
3573 }
3574 @@ -296,29 +293,44 @@ static inline int ext4_ext_space_root_idx(struct inode *inode, int check)
3575 * to allocate @blocks
3576 * Worse case is one block per extent
3577 */
3578 -int ext4_ext_calc_metadata_amount(struct inode *inode, int blocks)
3579 +int ext4_ext_calc_metadata_amount(struct inode *inode, sector_t lblock)
3580 {
3581 - int lcap, icap, rcap, leafs, idxs, num;
3582 - int newextents = blocks;
3583 -
3584 - rcap = ext4_ext_space_root_idx(inode, 0);
3585 - lcap = ext4_ext_space_block(inode, 0);
3586 - icap = ext4_ext_space_block_idx(inode, 0);
3587 + struct ext4_inode_info *ei = EXT4_I(inode);
3588 + int idxs, num = 0;
3589
3590 - /* number of new leaf blocks needed */
3591 - num = leafs = (newextents + lcap - 1) / lcap;
3592 + idxs = ((inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
3593 + / sizeof(struct ext4_extent_idx));
3594
3595 /*
3596 - * Worse case, we need separate index block(s)
3597 - * to link all new leaf blocks
3598 + * If the new delayed allocation block is contiguous with the
3599 + * previous da block, it can share index blocks with the
3600 + * previous block, so we only need to allocate a new index
3601 + * block every idxs leaf blocks. At ldxs**2 blocks, we need
3602 + * an additional index block, and at ldxs**3 blocks, yet
3603 + * another index blocks.
3604 */
3605 - idxs = (leafs + icap - 1) / icap;
3606 - do {
3607 - num += idxs;
3608 - idxs = (idxs + icap - 1) / icap;
3609 - } while (idxs > rcap);
3610 + if (ei->i_da_metadata_calc_len &&
3611 + ei->i_da_metadata_calc_last_lblock+1 == lblock) {
3612 + if ((ei->i_da_metadata_calc_len % idxs) == 0)
3613 + num++;
3614 + if ((ei->i_da_metadata_calc_len % (idxs*idxs)) == 0)
3615 + num++;
3616 + if ((ei->i_da_metadata_calc_len % (idxs*idxs*idxs)) == 0) {
3617 + num++;
3618 + ei->i_da_metadata_calc_len = 0;
3619 + } else
3620 + ei->i_da_metadata_calc_len++;
3621 + ei->i_da_metadata_calc_last_lblock++;
3622 + return num;
3623 + }
3624
3625 - return num;
3626 + /*
3627 + * In the worst case we need a new set of index blocks at
3628 + * every level of the inode's extent tree.
3629 + */
3630 + ei->i_da_metadata_calc_len = 1;
3631 + ei->i_da_metadata_calc_last_lblock = lblock;
3632 + return ext_depth(inode) + 1;
3633 }
3634
3635 static int
3636 @@ -2248,7 +2260,7 @@ static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start)
3637 int depth = ext_depth(inode);
3638 struct ext4_ext_path *path;
3639 handle_t *handle;
3640 - int i = 0, err = 0;
3641 + int i, err;
3642
3643 ext_debug("truncate since %u\n", start);
3644
3645 @@ -2257,23 +2269,26 @@ static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start)
3646 if (IS_ERR(handle))
3647 return PTR_ERR(handle);
3648
3649 +again:
3650 ext4_ext_invalidate_cache(inode);
3651
3652 /*
3653 * We start scanning from right side, freeing all the blocks
3654 * after i_size and walking into the tree depth-wise.
3655 */
3656 + depth = ext_depth(inode);
3657 path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_NOFS);
3658 if (path == NULL) {
3659 ext4_journal_stop(handle);
3660 return -ENOMEM;
3661 }
3662 + path[0].p_depth = depth;
3663 path[0].p_hdr = ext_inode_hdr(inode);
3664 if (ext4_ext_check(inode, path[0].p_hdr, depth)) {
3665 err = -EIO;
3666 goto out;
3667 }
3668 - path[0].p_depth = depth;
3669 + i = err = 0;
3670
3671 while (i >= 0 && err == 0) {
3672 if (i == depth) {
3673 @@ -2367,6 +2382,8 @@ static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start)
3674 out:
3675 ext4_ext_drop_refs(path);
3676 kfree(path);
3677 + if (err == -EAGAIN)
3678 + goto again;
3679 ext4_journal_stop(handle);
3680
3681 return err;
3682 @@ -2431,7 +2448,7 @@ static void bi_complete(struct bio *bio, int error)
3683 /* FIXME!! we need to try to merge to left or right after zero-out */
3684 static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
3685 {
3686 - int ret = -EIO;
3687 + int ret;
3688 struct bio *bio;
3689 int blkbits, blocksize;
3690 sector_t ee_pblock;
3691 @@ -2455,6 +2472,9 @@ static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
3692 len = ee_len;
3693
3694 bio = bio_alloc(GFP_NOIO, len);
3695 + if (!bio)
3696 + return -ENOMEM;
3697 +
3698 bio->bi_sector = ee_pblock;
3699 bio->bi_bdev = inode->i_sb->s_bdev;
3700
3701 @@ -2482,17 +2502,15 @@ static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
3702 submit_bio(WRITE, bio);
3703 wait_for_completion(&event);
3704
3705 - if (test_bit(BIO_UPTODATE, &bio->bi_flags))
3706 - ret = 0;
3707 - else {
3708 - ret = -EIO;
3709 - break;
3710 + if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) {
3711 + bio_put(bio);
3712 + return -EIO;
3713 }
3714 bio_put(bio);
3715 ee_len -= done;
3716 ee_pblock += done << (blkbits - 9);
3717 }
3718 - return ret;
3719 + return 0;
3720 }
3721
3722 #define EXT4_EXT_ZERO_LEN 7
3723 @@ -2517,11 +2535,21 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
3724 struct ext4_extent *ex2 = NULL;
3725 struct ext4_extent *ex3 = NULL;
3726 struct ext4_extent_header *eh;
3727 - ext4_lblk_t ee_block;
3728 + ext4_lblk_t ee_block, eof_block;
3729 unsigned int allocated, ee_len, depth;
3730 ext4_fsblk_t newblock;
3731 int err = 0;
3732 int ret = 0;
3733 + int may_zeroout;
3734 +
3735 + ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical"
3736 + "block %llu, max_blocks %u\n", inode->i_ino,
3737 + (unsigned long long)iblock, max_blocks);
3738 +
3739 + eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
3740 + inode->i_sb->s_blocksize_bits;
3741 + if (eof_block < iblock + max_blocks)
3742 + eof_block = iblock + max_blocks;
3743
3744 depth = ext_depth(inode);
3745 eh = path[depth].p_hdr;
3746 @@ -2530,16 +2558,23 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
3747 ee_len = ext4_ext_get_actual_len(ex);
3748 allocated = ee_len - (iblock - ee_block);
3749 newblock = iblock - ee_block + ext_pblock(ex);
3750 +
3751 ex2 = ex;
3752 orig_ex.ee_block = ex->ee_block;
3753 orig_ex.ee_len = cpu_to_le16(ee_len);
3754 ext4_ext_store_pblock(&orig_ex, ext_pblock(ex));
3755
3756 + /*
3757 + * It is safe to convert extent to initialized via explicit
3758 + * zeroout only if extent is fully insde i_size or new_size.
3759 + */
3760 + may_zeroout = ee_block + ee_len <= eof_block;
3761 +
3762 err = ext4_ext_get_access(handle, inode, path + depth);
3763 if (err)
3764 goto out;
3765 /* If extent has less than 2*EXT4_EXT_ZERO_LEN zerout directly */
3766 - if (ee_len <= 2*EXT4_EXT_ZERO_LEN) {
3767 + if (ee_len <= 2*EXT4_EXT_ZERO_LEN && may_zeroout) {
3768 err = ext4_ext_zeroout(inode, &orig_ex);
3769 if (err)
3770 goto fix_extent_len;
3771 @@ -2570,7 +2605,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
3772 if (allocated > max_blocks) {
3773 unsigned int newdepth;
3774 /* If extent has less than EXT4_EXT_ZERO_LEN zerout directly */
3775 - if (allocated <= EXT4_EXT_ZERO_LEN) {
3776 + if (allocated <= EXT4_EXT_ZERO_LEN && may_zeroout) {
3777 /*
3778 * iblock == ee_block is handled by the zerouout
3779 * at the beginning.
3780 @@ -2646,7 +2681,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
3781 ex3->ee_len = cpu_to_le16(allocated - max_blocks);
3782 ext4_ext_mark_uninitialized(ex3);
3783 err = ext4_ext_insert_extent(handle, inode, path, ex3, 0);
3784 - if (err == -ENOSPC) {
3785 + if (err == -ENOSPC && may_zeroout) {
3786 err = ext4_ext_zeroout(inode, &orig_ex);
3787 if (err)
3788 goto fix_extent_len;
3789 @@ -2670,8 +2705,10 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
3790 * update the extent length after successful insert of the
3791 * split extent
3792 */
3793 - orig_ex.ee_len = cpu_to_le16(ee_len -
3794 - ext4_ext_get_actual_len(ex3));
3795 + ee_len -= ext4_ext_get_actual_len(ex3);
3796 + orig_ex.ee_len = cpu_to_le16(ee_len);
3797 + may_zeroout = ee_block + ee_len <= eof_block;
3798 +
3799 depth = newdepth;
3800 ext4_ext_drop_refs(path);
3801 path = ext4_ext_find_extent(inode, iblock, path);
3802 @@ -2695,7 +2732,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
3803 * otherwise give the extent a chance to merge to left
3804 */
3805 if (le16_to_cpu(orig_ex.ee_len) <= EXT4_EXT_ZERO_LEN &&
3806 - iblock != ee_block) {
3807 + iblock != ee_block && may_zeroout) {
3808 err = ext4_ext_zeroout(inode, &orig_ex);
3809 if (err)
3810 goto fix_extent_len;
3811 @@ -2764,7 +2801,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
3812 goto out;
3813 insert:
3814 err = ext4_ext_insert_extent(handle, inode, path, &newex, 0);
3815 - if (err == -ENOSPC) {
3816 + if (err == -ENOSPC && may_zeroout) {
3817 err = ext4_ext_zeroout(inode, &orig_ex);
3818 if (err)
3819 goto fix_extent_len;
3820 @@ -2824,14 +2861,21 @@ static int ext4_split_unwritten_extents(handle_t *handle,
3821 struct ext4_extent *ex2 = NULL;
3822 struct ext4_extent *ex3 = NULL;
3823 struct ext4_extent_header *eh;
3824 - ext4_lblk_t ee_block;
3825 + ext4_lblk_t ee_block, eof_block;
3826 unsigned int allocated, ee_len, depth;
3827 ext4_fsblk_t newblock;
3828 int err = 0;
3829 + int may_zeroout;
3830 +
3831 + ext_debug("ext4_split_unwritten_extents: inode %lu, logical"
3832 + "block %llu, max_blocks %u\n", inode->i_ino,
3833 + (unsigned long long)iblock, max_blocks);
3834 +
3835 + eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
3836 + inode->i_sb->s_blocksize_bits;
3837 + if (eof_block < iblock + max_blocks)
3838 + eof_block = iblock + max_blocks;
3839
3840 - ext_debug("ext4_split_unwritten_extents: inode %lu,"
3841 - "iblock %llu, max_blocks %u\n", inode->i_ino,
3842 - (unsigned long long)iblock, max_blocks);
3843 depth = ext_depth(inode);
3844 eh = path[depth].p_hdr;
3845 ex = path[depth].p_ext;
3846 @@ -2839,12 +2883,19 @@ static int ext4_split_unwritten_extents(handle_t *handle,
3847 ee_len = ext4_ext_get_actual_len(ex);
3848 allocated = ee_len - (iblock - ee_block);
3849 newblock = iblock - ee_block + ext_pblock(ex);
3850 +
3851 ex2 = ex;
3852 orig_ex.ee_block = ex->ee_block;
3853 orig_ex.ee_len = cpu_to_le16(ee_len);
3854 ext4_ext_store_pblock(&orig_ex, ext_pblock(ex));
3855
3856 /*
3857 + * It is safe to convert extent to initialized via explicit
3858 + * zeroout only if extent is fully insde i_size or new_size.
3859 + */
3860 + may_zeroout = ee_block + ee_len <= eof_block;
3861 +
3862 + /*
3863 * If the uninitialized extent begins at the same logical
3864 * block where the write begins, and the write completely
3865 * covers the extent, then we don't need to split it.
3866 @@ -2878,7 +2929,7 @@ static int ext4_split_unwritten_extents(handle_t *handle,
3867 ex3->ee_len = cpu_to_le16(allocated - max_blocks);
3868 ext4_ext_mark_uninitialized(ex3);
3869 err = ext4_ext_insert_extent(handle, inode, path, ex3, flags);
3870 - if (err == -ENOSPC) {
3871 + if (err == -ENOSPC && may_zeroout) {
3872 err = ext4_ext_zeroout(inode, &orig_ex);
3873 if (err)
3874 goto fix_extent_len;
3875 @@ -2902,8 +2953,10 @@ static int ext4_split_unwritten_extents(handle_t *handle,
3876 * update the extent length after successful insert of the
3877 * split extent
3878 */
3879 - orig_ex.ee_len = cpu_to_le16(ee_len -
3880 - ext4_ext_get_actual_len(ex3));
3881 + ee_len -= ext4_ext_get_actual_len(ex3);
3882 + orig_ex.ee_len = cpu_to_le16(ee_len);
3883 + may_zeroout = ee_block + ee_len <= eof_block;
3884 +
3885 depth = newdepth;
3886 ext4_ext_drop_refs(path);
3887 path = ext4_ext_find_extent(inode, iblock, path);
3888 @@ -2949,7 +3002,7 @@ static int ext4_split_unwritten_extents(handle_t *handle,
3889 goto out;
3890 insert:
3891 err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
3892 - if (err == -ENOSPC) {
3893 + if (err == -ENOSPC && may_zeroout) {
3894 err = ext4_ext_zeroout(inode, &orig_ex);
3895 if (err)
3896 goto fix_extent_len;
3897 @@ -3029,6 +3082,14 @@ out:
3898 return err;
3899 }
3900
3901 +static void unmap_underlying_metadata_blocks(struct block_device *bdev,
3902 + sector_t block, int count)
3903 +{
3904 + int i;
3905 + for (i = 0; i < count; i++)
3906 + unmap_underlying_metadata(bdev, block + i);
3907 +}
3908 +
3909 static int
3910 ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
3911 ext4_lblk_t iblock, unsigned int max_blocks,
3912 @@ -3059,7 +3120,7 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
3913 if (io)
3914 io->flag = DIO_AIO_UNWRITTEN;
3915 else
3916 - EXT4_I(inode)->i_state |= EXT4_STATE_DIO_UNWRITTEN;
3917 + ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
3918 goto out;
3919 }
3920 /* async DIO end_io complete, convert the filled extent to written */
3921 @@ -3104,6 +3165,30 @@ out:
3922 } else
3923 allocated = ret;
3924 set_buffer_new(bh_result);
3925 + /*
3926 + * if we allocated more blocks than requested
3927 + * we need to make sure we unmap the extra block
3928 + * allocated. The actual needed block will get
3929 + * unmapped later when we find the buffer_head marked
3930 + * new.
3931 + */
3932 + if (allocated > max_blocks) {
3933 + unmap_underlying_metadata_blocks(inode->i_sb->s_bdev,
3934 + newblock + max_blocks,
3935 + allocated - max_blocks);
3936 + allocated = max_blocks;
3937 + }
3938 +
3939 + /*
3940 + * If we have done fallocate with the offset that is already
3941 + * delayed allocated, we would have block reservation
3942 + * and quota reservation done in the delayed write path.
3943 + * But fallocate would have already updated quota and block
3944 + * count for this offset. So cancel these reservation
3945 + */
3946 + if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
3947 + ext4_da_update_reserve_space(inode, allocated, 0);
3948 +
3949 map_out:
3950 set_buffer_mapped(bh_result);
3951 out1:
3952 @@ -3144,9 +3229,9 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
3953 {
3954 struct ext4_ext_path *path = NULL;
3955 struct ext4_extent_header *eh;
3956 - struct ext4_extent newex, *ex;
3957 + struct ext4_extent newex, *ex, *last_ex;
3958 ext4_fsblk_t newblock;
3959 - int err = 0, depth, ret, cache_type;
3960 + int i, err = 0, depth, ret, cache_type;
3961 unsigned int allocated = 0;
3962 struct ext4_allocation_request ar;
3963 ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
3964 @@ -3196,7 +3281,13 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
3965 * this situation is possible, though, _during_ tree modification;
3966 * this is why assert can't be put in ext4_ext_find_extent()
3967 */
3968 - BUG_ON(path[depth].p_ext == NULL && depth != 0);
3969 + if (path[depth].p_ext == NULL && depth != 0) {
3970 + ext4_error(inode->i_sb, __func__, "bad extent address "
3971 + "inode: %lu, iblock: %lu, depth: %d",
3972 + inode->i_ino, (unsigned long) iblock, depth);
3973 + err = -EIO;
3974 + goto out2;
3975 + }
3976 eh = path[depth].p_hdr;
3977
3978 ex = path[depth].p_ext;
3979 @@ -3315,10 +3406,36 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
3980 if (io)
3981 io->flag = DIO_AIO_UNWRITTEN;
3982 else
3983 - EXT4_I(inode)->i_state |=
3984 - EXT4_STATE_DIO_UNWRITTEN;;
3985 + ext4_set_inode_state(inode,
3986 + EXT4_STATE_DIO_UNWRITTEN);
3987 }
3988 }
3989 +
3990 + if (unlikely(ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS))) {
3991 + if (unlikely(!eh->eh_entries)) {
3992 + ext4_error(inode->i_sb, __func__,
3993 + "inode#%lu, eh->eh_entries = 0 and "
3994 + "EOFBLOCKS_FL set", inode->i_ino);
3995 + err = -EIO;
3996 + goto out2;
3997 + }
3998 + last_ex = EXT_LAST_EXTENT(eh);
3999 + /*
4000 + * If the current leaf block was reached by looking at
4001 + * the last index block all the way down the tree, and
4002 + * we are extending the inode beyond the last extent
4003 + * in the current leaf block, then clear the
4004 + * EOFBLOCKS_FL flag.
4005 + */
4006 + for (i = depth-1; i >= 0; i--) {
4007 + if (path[i].p_idx != EXT_LAST_INDEX(path[i].p_hdr))
4008 + break;
4009 + }
4010 + if ((i < 0) &&
4011 + (iblock + ar.len > le32_to_cpu(last_ex->ee_block) +
4012 + ext4_ext_get_actual_len(last_ex)))
4013 + ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
4014 + }
4015 err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
4016 if (err) {
4017 /* free data blocks we just allocated */
4018 @@ -3333,9 +3450,18 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
4019 /* previous routine could use block we allocated */
4020 newblock = ext_pblock(&newex);
4021 allocated = ext4_ext_get_actual_len(&newex);
4022 + if (allocated > max_blocks)
4023 + allocated = max_blocks;
4024 set_buffer_new(bh_result);
4025
4026 /*
4027 + * Update reserved blocks/metadata blocks after successful
4028 + * block allocation which had been deferred till now.
4029 + */
4030 + if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
4031 + ext4_da_update_reserve_space(inode, allocated, 1);
4032 +
4033 + /*
4034 * Cache the extent and update transaction to commit on fdatasync only
4035 * when it is _not_ an uninitialized extent.
4036 */
4037 @@ -3443,6 +3569,13 @@ static void ext4_falloc_update_inode(struct inode *inode,
4038 i_size_write(inode, new_size);
4039 if (new_size > EXT4_I(inode)->i_disksize)
4040 ext4_update_i_disksize(inode, new_size);
4041 + } else {
4042 + /*
4043 + * Mark that we allocate beyond EOF so the subsequent truncate
4044 + * can proceed even if the new size is the same as i_size.
4045 + */
4046 + if (new_size > i_size_read(inode))
4047 + ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
4048 }
4049
4050 }
4051 @@ -3470,7 +3603,7 @@ long ext4_fallocate(struct inode *inode, int mode, loff_t offset, loff_t len)
4052 * currently supporting (pre)allocate mode for extent-based
4053 * files _only_
4054 */
4055 - if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
4056 + if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
4057 return -EOPNOTSUPP;
4058
4059 /* preallocation to directories is currently not supported */
4060 @@ -3489,6 +3622,11 @@ long ext4_fallocate(struct inode *inode, int mode, loff_t offset, loff_t len)
4061 */
4062 credits = ext4_chunk_trans_blocks(inode, max_blocks);
4063 mutex_lock(&inode->i_mutex);
4064 + ret = inode_newsize_ok(inode, (len + offset));
4065 + if (ret) {
4066 + mutex_unlock(&inode->i_mutex);
4067 + return ret;
4068 + }
4069 retry:
4070 while (ret >= 0 && ret < max_blocks) {
4071 block = block + ret;
4072 @@ -3683,7 +3821,7 @@ static int ext4_xattr_fiemap(struct inode *inode,
4073 int error = 0;
4074
4075 /* in-inode? */
4076 - if (EXT4_I(inode)->i_state & EXT4_STATE_XATTR) {
4077 + if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
4078 struct ext4_iloc iloc;
4079 int offset; /* offset of xattr in inode */
4080
4081 @@ -3696,6 +3834,7 @@ static int ext4_xattr_fiemap(struct inode *inode,
4082 physical += offset;
4083 length = EXT4_SB(inode->i_sb)->s_inode_size - offset;
4084 flags |= FIEMAP_EXTENT_DATA_INLINE;
4085 + brelse(iloc.bh);
4086 } else { /* external block */
4087 physical = EXT4_I(inode)->i_file_acl << blockbits;
4088 length = inode->i_sb->s_blocksize;
4089 @@ -3714,7 +3853,7 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4090 int error = 0;
4091
4092 /* fallback to generic here if not in extents fmt */
4093 - if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
4094 + if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
4095 return generic_block_fiemap(inode, fieinfo, start, len,
4096 ext4_get_block);
4097
4098 diff --git a/fs/ext4/file.c b/fs/ext4/file.c
4099 index 9630583..2a60541 100644
4100 --- a/fs/ext4/file.c
4101 +++ b/fs/ext4/file.c
4102 @@ -35,9 +35,9 @@
4103 */
4104 static int ext4_release_file(struct inode *inode, struct file *filp)
4105 {
4106 - if (EXT4_I(inode)->i_state & EXT4_STATE_DA_ALLOC_CLOSE) {
4107 + if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
4108 ext4_alloc_da_blocks(inode);
4109 - EXT4_I(inode)->i_state &= ~EXT4_STATE_DA_ALLOC_CLOSE;
4110 + ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
4111 }
4112 /* if we are the last writer on the inode, drop the block reservation */
4113 if ((filp->f_mode & FMODE_WRITE) &&
4114 @@ -65,7 +65,7 @@ ext4_file_write(struct kiocb *iocb, const struct iovec *iov,
4115 * is smaller than s_maxbytes, which is for extent-mapped files.
4116 */
4117
4118 - if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) {
4119 + if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
4120 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4121 size_t length = iov_length(iov, nr_segs);
4122
4123 diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
4124 index d6049e4..c3660a6 100644
4125 --- a/fs/ext4/fsync.c
4126 +++ b/fs/ext4/fsync.c
4127 @@ -35,6 +35,29 @@
4128 #include <trace/events/ext4.h>
4129
4130 /*
4131 + * If we're not journaling and this is a just-created file, we have to
4132 + * sync our parent directory (if it was freshly created) since
4133 + * otherwise it will only be written by writeback, leaving a huge
4134 + * window during which a crash may lose the file. This may apply for
4135 + * the parent directory's parent as well, and so on recursively, if
4136 + * they are also freshly created.
4137 + */
4138 +static void ext4_sync_parent(struct inode *inode)
4139 +{
4140 + struct dentry *dentry = NULL;
4141 +
4142 + while (inode && ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY)) {
4143 + ext4_clear_inode_state(inode, EXT4_STATE_NEWENTRY);
4144 + dentry = list_entry(inode->i_dentry.next,
4145 + struct dentry, d_alias);
4146 + if (!dentry || !dentry->d_parent || !dentry->d_parent->d_inode)
4147 + break;
4148 + inode = dentry->d_parent->d_inode;
4149 + sync_mapping_buffers(inode->i_mapping);
4150 + }
4151 +}
4152 +
4153 +/*
4154 * akpm: A new design for ext4_sync_file().
4155 *
4156 * This is only called from sys_fsync(), sys_fdatasync() and sys_msync().
4157 @@ -67,8 +90,12 @@ int ext4_sync_file(struct file *file, struct dentry *dentry, int datasync)
4158 if (ret < 0)
4159 return ret;
4160
4161 - if (!journal)
4162 - return simple_fsync(file, dentry, datasync);
4163 + if (!journal) {
4164 + ret = simple_fsync(file, dentry, datasync);
4165 + if (!ret && !list_empty(&inode->i_dentry))
4166 + ext4_sync_parent(inode);
4167 + return ret;
4168 + }
4169
4170 /*
4171 * data=writeback,ordered:
4172 @@ -88,9 +115,21 @@ int ext4_sync_file(struct file *file, struct dentry *dentry, int datasync)
4173 return ext4_force_commit(inode->i_sb);
4174
4175 commit_tid = datasync ? ei->i_datasync_tid : ei->i_sync_tid;
4176 - if (jbd2_log_start_commit(journal, commit_tid))
4177 - jbd2_log_wait_commit(journal, commit_tid);
4178 - else if (journal->j_flags & JBD2_BARRIER)
4179 + if (jbd2_log_start_commit(journal, commit_tid)) {
4180 + /*
4181 + * When the journal is on a different device than the
4182 + * fs data disk, we need to issue the barrier in
4183 + * writeback mode. (In ordered mode, the jbd2 layer
4184 + * will take care of issuing the barrier. In
4185 + * data=journal, all of the data blocks are written to
4186 + * the journal device.)
4187 + */
4188 + if (ext4_should_writeback_data(inode) &&
4189 + (journal->j_fs_dev != journal->j_dev) &&
4190 + (journal->j_flags & JBD2_BARRIER))
4191 + blkdev_issue_flush(inode->i_sb->s_bdev, NULL);
4192 + ret = jbd2_log_wait_commit(journal, commit_tid);
4193 + } else if (journal->j_flags & JBD2_BARRIER)
4194 blkdev_issue_flush(inode->i_sb->s_bdev, NULL);
4195 return ret;
4196 }
4197 diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
4198 index f3624ea..55a93f5 100644
4199 --- a/fs/ext4/ialloc.c
4200 +++ b/fs/ext4/ialloc.c
4201 @@ -244,57 +244,50 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
4202 if (fatal)
4203 goto error_return;
4204
4205 - /* Ok, now we can actually update the inode bitmaps.. */
4206 - cleared = ext4_clear_bit_atomic(ext4_group_lock_ptr(sb, block_group),
4207 - bit, bitmap_bh->b_data);
4208 - if (!cleared)
4209 - ext4_error(sb, "ext4_free_inode",
4210 - "bit already cleared for inode %lu", ino);
4211 - else {
4212 - gdp = ext4_get_group_desc(sb, block_group, &bh2);
4213 -
4214 + fatal = -ESRCH;
4215 + gdp = ext4_get_group_desc(sb, block_group, &bh2);
4216 + if (gdp) {
4217 BUFFER_TRACE(bh2, "get_write_access");
4218 fatal = ext4_journal_get_write_access(handle, bh2);
4219 - if (fatal) goto error_return;
4220 -
4221 - if (gdp) {
4222 - ext4_lock_group(sb, block_group);
4223 - count = ext4_free_inodes_count(sb, gdp) + 1;
4224 - ext4_free_inodes_set(sb, gdp, count);
4225 - if (is_directory) {
4226 - count = ext4_used_dirs_count(sb, gdp) - 1;
4227 - ext4_used_dirs_set(sb, gdp, count);
4228 - if (sbi->s_log_groups_per_flex) {
4229 - ext4_group_t f;
4230 -
4231 - f = ext4_flex_group(sbi, block_group);
4232 - atomic_dec(&sbi->s_flex_groups[f].free_inodes);
4233 - }
4234 + }
4235 + ext4_lock_group(sb, block_group);
4236 + cleared = ext4_clear_bit(bit, bitmap_bh->b_data);
4237 + if (fatal || !cleared) {
4238 + ext4_unlock_group(sb, block_group);
4239 + goto out;
4240 + }
4241
4242 - }
4243 - gdp->bg_checksum = ext4_group_desc_csum(sbi,
4244 - block_group, gdp);
4245 - ext4_unlock_group(sb, block_group);
4246 - percpu_counter_inc(&sbi->s_freeinodes_counter);
4247 - if (is_directory)
4248 - percpu_counter_dec(&sbi->s_dirs_counter);
4249 -
4250 - if (sbi->s_log_groups_per_flex) {
4251 - ext4_group_t f;
4252 -
4253 - f = ext4_flex_group(sbi, block_group);
4254 - atomic_inc(&sbi->s_flex_groups[f].free_inodes);
4255 - }
4256 - }
4257 - BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata");
4258 - err = ext4_handle_dirty_metadata(handle, NULL, bh2);
4259 - if (!fatal) fatal = err;
4260 + count = ext4_free_inodes_count(sb, gdp) + 1;
4261 + ext4_free_inodes_set(sb, gdp, count);
4262 + if (is_directory) {
4263 + count = ext4_used_dirs_count(sb, gdp) - 1;
4264 + ext4_used_dirs_set(sb, gdp, count);
4265 + percpu_counter_dec(&sbi->s_dirs_counter);
4266 }
4267 - BUFFER_TRACE(bitmap_bh, "call ext4_handle_dirty_metadata");
4268 - err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
4269 - if (!fatal)
4270 - fatal = err;
4271 - sb->s_dirt = 1;
4272 + gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp);
4273 + ext4_unlock_group(sb, block_group);
4274 +
4275 + percpu_counter_inc(&sbi->s_freeinodes_counter);
4276 + if (sbi->s_log_groups_per_flex) {
4277 + ext4_group_t f = ext4_flex_group(sbi, block_group);
4278 +
4279 + atomic_inc(&sbi->s_flex_groups[f].free_inodes);
4280 + if (is_directory)
4281 + atomic_dec(&sbi->s_flex_groups[f].used_dirs);
4282 + }
4283 + BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata");
4284 + fatal = ext4_handle_dirty_metadata(handle, NULL, bh2);
4285 +out:
4286 + if (cleared) {
4287 + BUFFER_TRACE(bitmap_bh, "call ext4_handle_dirty_metadata");
4288 + err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
4289 + if (!fatal)
4290 + fatal = err;
4291 + sb->s_dirt = 1;
4292 + } else
4293 + ext4_error(sb, "ext4_free_inode",
4294 + "bit already cleared for inode %lu", ino);
4295 +
4296 error_return:
4297 brelse(bitmap_bh);
4298 ext4_std_error(sb, fatal);
4299 @@ -504,7 +497,7 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent,
4300
4301 if (S_ISDIR(mode) &&
4302 ((parent == sb->s_root->d_inode) ||
4303 - (EXT4_I(parent)->i_flags & EXT4_TOPDIR_FL))) {
4304 + (ext4_test_inode_flag(parent, EXT4_INODE_TOPDIR)))) {
4305 int best_ndir = inodes_per_group;
4306 int ret = -1;
4307
4308 @@ -779,7 +772,7 @@ static int ext4_claim_inode(struct super_block *sb,
4309 if (sbi->s_log_groups_per_flex) {
4310 ext4_group_t f = ext4_flex_group(sbi, group);
4311
4312 - atomic_inc(&sbi->s_flex_groups[f].free_inodes);
4313 + atomic_inc(&sbi->s_flex_groups[f].used_dirs);
4314 }
4315 }
4316 gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);
4317 @@ -904,7 +897,7 @@ repeat_in_this_group:
4318 BUFFER_TRACE(inode_bitmap_bh,
4319 "call ext4_handle_dirty_metadata");
4320 err = ext4_handle_dirty_metadata(handle,
4321 - inode,
4322 + NULL,
4323 inode_bitmap_bh);
4324 if (err)
4325 goto fail;
4326 @@ -1029,7 +1022,8 @@ got:
4327 inode->i_generation = sbi->s_next_generation++;
4328 spin_unlock(&sbi->s_next_gen_lock);
4329
4330 - ei->i_state = EXT4_STATE_NEW;
4331 + ei->i_state_flags = 0;
4332 + ext4_set_inode_state(inode, EXT4_STATE_NEW);
4333
4334 ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize;
4335
4336 @@ -1050,7 +1044,7 @@ got:
4337 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
4338 /* set extent flag only for directory, file and normal symlink*/
4339 if (S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode)) {
4340 - EXT4_I(inode)->i_flags |= EXT4_EXTENTS_FL;
4341 + ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS);
4342 ext4_ext_tree_init(handle, inode);
4343 }
4344 }
4345 diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
4346 index 16efcee..99596fc 100644
4347 --- a/fs/ext4/inode.c
4348 +++ b/fs/ext4/inode.c
4349 @@ -957,7 +957,7 @@ static int ext4_ind_get_blocks(handle_t *handle, struct inode *inode,
4350 int count = 0;
4351 ext4_fsblk_t first_block = 0;
4352
4353 - J_ASSERT(!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL));
4354 + J_ASSERT(!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)));
4355 J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0);
4356 depth = ext4_block_to_path(inode, iblock, offsets,
4357 &blocks_to_boundary);
4358 @@ -1051,81 +1051,115 @@ qsize_t *ext4_get_reserved_space(struct inode *inode)
4359 return &EXT4_I(inode)->i_reserved_quota;
4360 }
4361 #endif
4362 +
4363 /*
4364 * Calculate the number of metadata blocks need to reserve
4365 - * to allocate @blocks for non extent file based file
4366 + * to allocate a new block at @lblocks for non extent file based file
4367 */
4368 -static int ext4_indirect_calc_metadata_amount(struct inode *inode, int blocks)
4369 +static int ext4_indirect_calc_metadata_amount(struct inode *inode,
4370 + sector_t lblock)
4371 {
4372 - int icap = EXT4_ADDR_PER_BLOCK(inode->i_sb);
4373 - int ind_blks, dind_blks, tind_blks;
4374 -
4375 - /* number of new indirect blocks needed */
4376 - ind_blks = (blocks + icap - 1) / icap;
4377 + struct ext4_inode_info *ei = EXT4_I(inode);
4378 + sector_t dind_mask = ~((sector_t)EXT4_ADDR_PER_BLOCK(inode->i_sb) - 1);
4379 + int blk_bits;
4380
4381 - dind_blks = (ind_blks + icap - 1) / icap;
4382 + if (lblock < EXT4_NDIR_BLOCKS)
4383 + return 0;
4384
4385 - tind_blks = 1;
4386 + lblock -= EXT4_NDIR_BLOCKS;
4387
4388 - return ind_blks + dind_blks + tind_blks;
4389 + if (ei->i_da_metadata_calc_len &&
4390 + (lblock & dind_mask) == ei->i_da_metadata_calc_last_lblock) {
4391 + ei->i_da_metadata_calc_len++;
4392 + return 0;
4393 + }
4394 + ei->i_da_metadata_calc_last_lblock = lblock & dind_mask;
4395 + ei->i_da_metadata_calc_len = 1;
4396 + blk_bits = order_base_2(lblock);
4397 + return (blk_bits / EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb)) + 1;
4398 }
4399
4400 /*
4401 * Calculate the number of metadata blocks need to reserve
4402 - * to allocate given number of blocks
4403 + * to allocate a block located at @lblock
4404 */
4405 -static int ext4_calc_metadata_amount(struct inode *inode, int blocks)
4406 +static int ext4_calc_metadata_amount(struct inode *inode, sector_t lblock)
4407 {
4408 - if (!blocks)
4409 - return 0;
4410 -
4411 - if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)
4412 - return ext4_ext_calc_metadata_amount(inode, blocks);
4413 + if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4414 + return ext4_ext_calc_metadata_amount(inode, lblock);
4415
4416 - return ext4_indirect_calc_metadata_amount(inode, blocks);
4417 + return ext4_indirect_calc_metadata_amount(inode, lblock);
4418 }
4419
4420 -static void ext4_da_update_reserve_space(struct inode *inode, int used)
4421 +/*
4422 + * Called with i_data_sem down, which is important since we can call
4423 + * ext4_discard_preallocations() from here.
4424 + */
4425 +void ext4_da_update_reserve_space(struct inode *inode,
4426 + int used, int quota_claim)
4427 {
4428 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4429 - int total, mdb, mdb_free;
4430 -
4431 - spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
4432 - /* recalculate the number of metablocks still need to be reserved */
4433 - total = EXT4_I(inode)->i_reserved_data_blocks - used;
4434 - mdb = ext4_calc_metadata_amount(inode, total);
4435 -
4436 - /* figure out how many metablocks to release */
4437 - BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks);
4438 - mdb_free = EXT4_I(inode)->i_reserved_meta_blocks - mdb;
4439 -
4440 - if (mdb_free) {
4441 - /* Account for allocated meta_blocks */
4442 - mdb_free -= EXT4_I(inode)->i_allocated_meta_blocks;
4443 -
4444 - /* update fs dirty blocks counter */
4445 + struct ext4_inode_info *ei = EXT4_I(inode);
4446 + int mdb_free = 0, allocated_meta_blocks = 0;
4447 +
4448 + spin_lock(&ei->i_block_reservation_lock);
4449 + if (unlikely(used > ei->i_reserved_data_blocks)) {
4450 + ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, used %d "
4451 + "with only %d reserved data blocks\n",
4452 + __func__, inode->i_ino, used,
4453 + ei->i_reserved_data_blocks);
4454 + WARN_ON(1);
4455 + used = ei->i_reserved_data_blocks;
4456 + }
4457 +
4458 + /* Update per-inode reservations */
4459 + ei->i_reserved_data_blocks -= used;
4460 + used += ei->i_allocated_meta_blocks;
4461 + ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks;
4462 + allocated_meta_blocks = ei->i_allocated_meta_blocks;
4463 + ei->i_allocated_meta_blocks = 0;
4464 + percpu_counter_sub(&sbi->s_dirtyblocks_counter, used);
4465 +
4466 + if (ei->i_reserved_data_blocks == 0) {
4467 + /*
4468 + * We can release all of the reserved metadata blocks
4469 + * only when we have written all of the delayed
4470 + * allocation blocks.
4471 + */
4472 + mdb_free = ei->i_reserved_meta_blocks;
4473 + ei->i_reserved_meta_blocks = 0;
4474 + ei->i_da_metadata_calc_len = 0;
4475 percpu_counter_sub(&sbi->s_dirtyblocks_counter, mdb_free);
4476 - EXT4_I(inode)->i_allocated_meta_blocks = 0;
4477 - EXT4_I(inode)->i_reserved_meta_blocks = mdb;
4478 }
4479 -
4480 - /* update per-inode reservations */
4481 - BUG_ON(used > EXT4_I(inode)->i_reserved_data_blocks);
4482 - EXT4_I(inode)->i_reserved_data_blocks -= used;
4483 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
4484
4485 - /*
4486 - * free those over-booking quota for metadata blocks
4487 - */
4488 - if (mdb_free)
4489 - vfs_dq_release_reservation_block(inode, mdb_free);
4490 + /* Update quota subsystem */
4491 + if (quota_claim) {
4492 + vfs_dq_claim_block(inode, used);
4493 + if (mdb_free)
4494 + vfs_dq_release_reservation_block(inode, mdb_free);
4495 + } else {
4496 + /*
4497 + * We did fallocate with an offset that is already delayed
4498 + * allocated. So on delayed allocated writeback we should
4499 + * not update the quota for allocated blocks. But then
4500 + * converting an fallocate region to initialized region would
4501 + * have caused a metadata allocation. So claim quota for
4502 + * that
4503 + */
4504 + if (allocated_meta_blocks)
4505 + vfs_dq_claim_block(inode, allocated_meta_blocks);
4506 + vfs_dq_release_reservation_block(inode, mdb_free + used -
4507 + allocated_meta_blocks);
4508 + }
4509
4510 /*
4511 * If we have done all the pending block allocations and if
4512 * there aren't any writers on the inode, we can discard the
4513 * inode's preallocations.
4514 */
4515 - if (!total && (atomic_read(&inode->i_writecount) == 0))
4516 + if ((ei->i_reserved_data_blocks == 0) &&
4517 + (atomic_read(&inode->i_writecount) == 0))
4518 ext4_discard_preallocations(inode);
4519 }
4520
4521 @@ -1240,7 +1274,7 @@ int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block,
4522 * file system block.
4523 */
4524 down_read((&EXT4_I(inode)->i_data_sem));
4525 - if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
4526 + if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
4527 retval = ext4_ext_get_blocks(handle, inode, block, max_blocks,
4528 bh, 0);
4529 } else {
4530 @@ -1302,7 +1336,7 @@ int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block,
4531 * We need to check for EXT4 here because migrate
4532 * could have changed the inode type in between
4533 */
4534 - if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
4535 + if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
4536 retval = ext4_ext_get_blocks(handle, inode, block, max_blocks,
4537 bh, flags);
4538 } else {
4539 @@ -1315,20 +1349,22 @@ int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block,
4540 * i_data's format changing. Force the migrate
4541 * to fail by clearing migrate flags
4542 */
4543 - EXT4_I(inode)->i_state &= ~EXT4_STATE_EXT_MIGRATE;
4544 + ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
4545 }
4546 - }
4547
4548 + /*
4549 + * Update reserved blocks/metadata blocks after successful
4550 + * block allocation which had been deferred till now. We don't
4551 + * support fallocate for non extent files. So we can update
4552 + * reserve space here.
4553 + */
4554 + if ((retval > 0) &&
4555 + (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE))
4556 + ext4_da_update_reserve_space(inode, retval, 1);
4557 + }
4558 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
4559 EXT4_I(inode)->i_delalloc_reserved_flag = 0;
4560
4561 - /*
4562 - * Update reserved blocks/metadata blocks after successful
4563 - * block allocation which had been deferred till now.
4564 - */
4565 - if ((retval > 0) && (flags & EXT4_GET_BLOCKS_UPDATE_RESERVE_SPACE))
4566 - ext4_da_update_reserve_space(inode, retval);
4567 -
4568 up_write((&EXT4_I(inode)->i_data_sem));
4569 if (retval > 0 && buffer_mapped(bh)) {
4570 int ret = check_block_validity(inode, "file system "
4571 @@ -1800,7 +1836,7 @@ static int ext4_journalled_write_end(struct file *file,
4572 new_i_size = pos + copied;
4573 if (new_i_size > inode->i_size)
4574 i_size_write(inode, pos+copied);
4575 - EXT4_I(inode)->i_state |= EXT4_STATE_JDATA;
4576 + ext4_set_inode_state(inode, EXT4_STATE_JDATA);
4577 if (new_i_size > EXT4_I(inode)->i_disksize) {
4578 ext4_update_i_disksize(inode, new_i_size);
4579 ret2 = ext4_mark_inode_dirty(handle, inode);
4580 @@ -1834,11 +1870,15 @@ static int ext4_journalled_write_end(struct file *file,
4581 return ret ? ret : copied;
4582 }
4583
4584 -static int ext4_da_reserve_space(struct inode *inode, int nrblocks)
4585 +/*
4586 + * Reserve a single block located at lblock
4587 + */
4588 +static int ext4_da_reserve_space(struct inode *inode, sector_t lblock)
4589 {
4590 int retries = 0;
4591 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4592 - unsigned long md_needed, mdblocks, total = 0;
4593 + struct ext4_inode_info *ei = EXT4_I(inode);
4594 + unsigned long md_needed, md_reserved;
4595
4596 /*
4597 * recalculate the amount of metadata blocks to reserve
4598 @@ -1846,35 +1886,31 @@ static int ext4_da_reserve_space(struct inode *inode, int nrblocks)
4599 * worse case is one extent per block
4600 */
4601 repeat:
4602 - spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
4603 - total = EXT4_I(inode)->i_reserved_data_blocks + nrblocks;
4604 - mdblocks = ext4_calc_metadata_amount(inode, total);
4605 - BUG_ON(mdblocks < EXT4_I(inode)->i_reserved_meta_blocks);
4606 -
4607 - md_needed = mdblocks - EXT4_I(inode)->i_reserved_meta_blocks;
4608 - total = md_needed + nrblocks;
4609 - spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
4610 + spin_lock(&ei->i_block_reservation_lock);
4611 + md_reserved = ei->i_reserved_meta_blocks;
4612 + md_needed = ext4_calc_metadata_amount(inode, lblock);
4613 + spin_unlock(&ei->i_block_reservation_lock);
4614
4615 /*
4616 * Make quota reservation here to prevent quota overflow
4617 * later. Real quota accounting is done at pages writeout
4618 * time.
4619 */
4620 - if (vfs_dq_reserve_block(inode, total))
4621 + if (vfs_dq_reserve_block(inode, md_needed + 1))
4622 return -EDQUOT;
4623
4624 - if (ext4_claim_free_blocks(sbi, total)) {
4625 - vfs_dq_release_reservation_block(inode, total);
4626 + if (ext4_claim_free_blocks(sbi, md_needed + 1)) {
4627 + vfs_dq_release_reservation_block(inode, md_needed + 1);
4628 if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
4629 yield();
4630 goto repeat;
4631 }
4632 return -ENOSPC;
4633 }
4634 - spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
4635 - EXT4_I(inode)->i_reserved_data_blocks += nrblocks;
4636 - EXT4_I(inode)->i_reserved_meta_blocks += md_needed;
4637 - spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
4638 + spin_lock(&ei->i_block_reservation_lock);
4639 + ei->i_reserved_data_blocks++;
4640 + ei->i_reserved_meta_blocks += md_needed;
4641 + spin_unlock(&ei->i_block_reservation_lock);
4642
4643 return 0; /* success */
4644 }
4645 @@ -1882,49 +1918,46 @@ repeat:
4646 static void ext4_da_release_space(struct inode *inode, int to_free)
4647 {
4648 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4649 - int total, mdb, mdb_free, release;
4650 + struct ext4_inode_info *ei = EXT4_I(inode);
4651
4652 if (!to_free)
4653 return; /* Nothing to release, exit */
4654
4655 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
4656
4657 - if (!EXT4_I(inode)->i_reserved_data_blocks) {
4658 + if (unlikely(to_free > ei->i_reserved_data_blocks)) {
4659 /*
4660 - * if there is no reserved blocks, but we try to free some
4661 - * then the counter is messed up somewhere.
4662 - * but since this function is called from invalidate
4663 - * page, it's harmless to return without any action
4664 + * if there aren't enough reserved blocks, then the
4665 + * counter is messed up somewhere. Since this
4666 + * function is called from invalidate page, it's
4667 + * harmless to return without any action.
4668 */
4669 - printk(KERN_INFO "ext4 delalloc try to release %d reserved "
4670 - "blocks for inode %lu, but there is no reserved "
4671 - "data blocks\n", to_free, inode->i_ino);
4672 - spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
4673 - return;
4674 + ext4_msg(inode->i_sb, KERN_NOTICE, "ext4_da_release_space: "
4675 + "ino %lu, to_free %d with only %d reserved "
4676 + "data blocks\n", inode->i_ino, to_free,
4677 + ei->i_reserved_data_blocks);
4678 + WARN_ON(1);
4679 + to_free = ei->i_reserved_data_blocks;
4680 }
4681 + ei->i_reserved_data_blocks -= to_free;
4682
4683 - /* recalculate the number of metablocks still need to be reserved */
4684 - total = EXT4_I(inode)->i_reserved_data_blocks - to_free;
4685 - mdb = ext4_calc_metadata_amount(inode, total);
4686 -
4687 - /* figure out how many metablocks to release */
4688 - BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks);
4689 - mdb_free = EXT4_I(inode)->i_reserved_meta_blocks - mdb;
4690 -
4691 - release = to_free + mdb_free;
4692 -
4693 - /* update fs dirty blocks counter for truncate case */
4694 - percpu_counter_sub(&sbi->s_dirtyblocks_counter, release);
4695 + if (ei->i_reserved_data_blocks == 0) {
4696 + /*
4697 + * We can release all of the reserved metadata blocks
4698 + * only when we have written all of the delayed
4699 + * allocation blocks.
4700 + */
4701 + to_free += ei->i_reserved_meta_blocks;
4702 + ei->i_reserved_meta_blocks = 0;
4703 + ei->i_da_metadata_calc_len = 0;
4704 + }
4705
4706 - /* update per-inode reservations */
4707 - BUG_ON(to_free > EXT4_I(inode)->i_reserved_data_blocks);
4708 - EXT4_I(inode)->i_reserved_data_blocks -= to_free;
4709 + /* update fs dirty blocks counter */
4710 + percpu_counter_sub(&sbi->s_dirtyblocks_counter, to_free);
4711
4712 - BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks);
4713 - EXT4_I(inode)->i_reserved_meta_blocks = mdb;
4714 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
4715
4716 - vfs_dq_release_reservation_block(inode, release);
4717 + vfs_dq_release_reservation_block(inode, to_free);
4718 }
4719
4720 static void ext4_da_page_release_reservation(struct page *page,
4721 @@ -2229,10 +2262,10 @@ static int mpage_da_map_blocks(struct mpage_da_data *mpd)
4722 * variables are updated after the blocks have been allocated.
4723 */
4724 new.b_state = 0;
4725 - get_blocks_flags = (EXT4_GET_BLOCKS_CREATE |
4726 - EXT4_GET_BLOCKS_DELALLOC_RESERVE);
4727 + get_blocks_flags = EXT4_GET_BLOCKS_CREATE;
4728 if (mpd->b_state & (1 << BH_Delay))
4729 - get_blocks_flags |= EXT4_GET_BLOCKS_UPDATE_RESERVE_SPACE;
4730 + get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
4731 +
4732 blks = ext4_get_blocks(handle, mpd->inode, next, max_blocks,
4733 &new, get_blocks_flags);
4734 if (blks < 0) {
4735 @@ -2261,7 +2294,7 @@ static int mpage_da_map_blocks(struct mpage_da_data *mpd)
4736 ext4_msg(mpd->inode->i_sb, KERN_CRIT,
4737 "delayed block allocation failed for inode %lu at "
4738 "logical offset %llu with max blocks %zd with "
4739 - "error %d\n", mpd->inode->i_ino,
4740 + "error %d", mpd->inode->i_ino,
4741 (unsigned long long) next,
4742 mpd->b_size >> mpd->inode->i_blkbits, err);
4743 printk(KERN_CRIT "This should not happen!! "
4744 @@ -2328,8 +2361,17 @@ static void mpage_add_bh_to_extent(struct mpage_da_data *mpd,
4745 sector_t next;
4746 int nrblocks = mpd->b_size >> mpd->inode->i_blkbits;
4747
4748 + /*
4749 + * XXX Don't go larger than mballoc is willing to allocate
4750 + * This is a stopgap solution. We eventually need to fold
4751 + * mpage_da_submit_io() into this function and then call
4752 + * ext4_get_blocks() multiple times in a loop
4753 + */
4754 + if (nrblocks >= 8*1024*1024/mpd->inode->i_sb->s_blocksize)
4755 + goto flush_it;
4756 +
4757 /* check if thereserved journal credits might overflow */
4758 - if (!(EXT4_I(mpd->inode)->i_flags & EXT4_EXTENTS_FL)) {
4759 + if (!(ext4_test_inode_flag(mpd->inode, EXT4_INODE_EXTENTS))) {
4760 if (nrblocks >= EXT4_MAX_TRANS_DATA) {
4761 /*
4762 * With non-extent format we are limited by the journal
4763 @@ -2530,7 +2572,7 @@ static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
4764 * XXX: __block_prepare_write() unmaps passed block,
4765 * is it OK?
4766 */
4767 - ret = ext4_da_reserve_space(inode, 1);
4768 + ret = ext4_da_reserve_space(inode, iblock);
4769 if (ret)
4770 /* not enough space to reserve */
4771 return ret;
4772 @@ -2641,7 +2683,7 @@ static int __ext4_journalled_writepage(struct page *page,
4773 ret = err;
4774
4775 walk_page_buffers(handle, page_bufs, 0, len, NULL, bput_one);
4776 - EXT4_I(inode)->i_state |= EXT4_STATE_JDATA;
4777 + ext4_set_inode_state(inode, EXT4_STATE_JDATA);
4778 out:
4779 return ret;
4780 }
4781 @@ -2794,7 +2836,7 @@ static int ext4_da_writepages_trans_blocks(struct inode *inode)
4782 * number of contiguous block. So we will limit
4783 * number of contiguous block to a sane value
4784 */
4785 - if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) &&
4786 + if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) &&
4787 (max_blocks > EXT4_MAX_TRANS_DATA))
4788 max_blocks = EXT4_MAX_TRANS_DATA;
4789
4790 @@ -2914,7 +2956,7 @@ retry:
4791 if (IS_ERR(handle)) {
4792 ret = PTR_ERR(handle);
4793 ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: "
4794 - "%ld pages, ino %lu; err %d\n", __func__,
4795 + "%ld pages, ino %lu; err %d", __func__,
4796 wbc->nr_to_write, inode->i_ino, ret);
4797 goto out_writepages;
4798 }
4799 @@ -2989,7 +3031,7 @@ retry:
4800 if (pages_skipped != wbc->pages_skipped)
4801 ext4_msg(inode->i_sb, KERN_CRIT,
4802 "This should not happen leaving %s "
4803 - "with nr_to_write = %ld ret = %d\n",
4804 + "with nr_to_write = %ld ret = %d",
4805 __func__, wbc->nr_to_write, ret);
4806
4807 /* Update index */
4808 @@ -3005,8 +3047,7 @@ retry:
4809 out_writepages:
4810 if (!no_nrwrite_index_update)
4811 wbc->no_nrwrite_index_update = 0;
4812 - if (wbc->nr_to_write > nr_to_writebump)
4813 - wbc->nr_to_write -= nr_to_writebump;
4814 + wbc->nr_to_write -= nr_to_writebump;
4815 wbc->range_start = range_start;
4816 trace_ext4_da_writepages_result(inode, wbc, ret, pages_written);
4817 return ret;
4818 @@ -3050,7 +3091,7 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
4819 loff_t pos, unsigned len, unsigned flags,
4820 struct page **pagep, void **fsdata)
4821 {
4822 - int ret, retries = 0;
4823 + int ret, retries = 0, quota_retries = 0;
4824 struct page *page;
4825 pgoff_t index;
4826 unsigned from, to;
4827 @@ -3109,6 +3150,22 @@ retry:
4828
4829 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
4830 goto retry;
4831 +
4832 + if ((ret == -EDQUOT) &&
4833 + EXT4_I(inode)->i_reserved_meta_blocks &&
4834 + (quota_retries++ < 3)) {
4835 + /*
4836 + * Since we often over-estimate the number of meta
4837 + * data blocks required, we may sometimes get a
4838 + * spurios out of quota error even though there would
4839 + * be enough space once we write the data blocks and
4840 + * find out how many meta data blocks were _really_
4841 + * required. So try forcing the inode write to see if
4842 + * that helps.
4843 + */
4844 + write_inode_now(inode, (quota_retries == 3));
4845 + goto retry;
4846 + }
4847 out:
4848 return ret;
4849 }
4850 @@ -3297,7 +3354,8 @@ static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
4851 filemap_write_and_wait(mapping);
4852 }
4853
4854 - if (EXT4_JOURNAL(inode) && EXT4_I(inode)->i_state & EXT4_STATE_JDATA) {
4855 + if (EXT4_JOURNAL(inode) &&
4856 + ext4_test_inode_state(inode, EXT4_STATE_JDATA)) {
4857 /*
4858 * This is a REALLY heavyweight approach, but the use of
4859 * bmap on dirty files is expected to be extremely rare:
4860 @@ -3316,7 +3374,7 @@ static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
4861 * everything they get.
4862 */
4863
4864 - EXT4_I(inode)->i_state &= ~EXT4_STATE_JDATA;
4865 + ext4_clear_inode_state(inode, EXT4_STATE_JDATA);
4866 journal = EXT4_JOURNAL(inode);
4867 jbd2_journal_lock_updates(journal);
4868 err = jbd2_journal_flush(journal);
4869 @@ -3432,6 +3490,9 @@ retry:
4870 * but cannot extend i_size. Bail out and pretend
4871 * the write failed... */
4872 ret = PTR_ERR(handle);
4873 + if (inode->i_nlink)
4874 + ext4_orphan_del(NULL, inode);
4875 +
4876 goto out;
4877 }
4878 if (inode->i_nlink)
4879 @@ -3784,8 +3845,8 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
4880 if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) {
4881 ext4_free_io_end(iocb->private);
4882 iocb->private = NULL;
4883 - } else if (ret > 0 && (EXT4_I(inode)->i_state &
4884 - EXT4_STATE_DIO_UNWRITTEN)) {
4885 + } else if (ret > 0 && ext4_test_inode_state(inode,
4886 + EXT4_STATE_DIO_UNWRITTEN)) {
4887 int err;
4888 /*
4889 * for non AIO case, since the IO is already
4890 @@ -3795,7 +3856,7 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
4891 offset, ret);
4892 if (err < 0)
4893 ret = err;
4894 - EXT4_I(inode)->i_state &= ~EXT4_STATE_DIO_UNWRITTEN;
4895 + ext4_clear_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
4896 }
4897 return ret;
4898 }
4899 @@ -3811,7 +3872,7 @@ static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
4900 struct file *file = iocb->ki_filp;
4901 struct inode *inode = file->f_mapping->host;
4902
4903 - if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)
4904 + if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4905 return ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs);
4906
4907 return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
4908 @@ -4442,10 +4503,12 @@ void ext4_truncate(struct inode *inode)
4909 if (!ext4_can_truncate(inode))
4910 return;
4911
4912 + ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
4913 +
4914 if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC))
4915 - ei->i_state |= EXT4_STATE_DA_ALLOC_CLOSE;
4916 + ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
4917
4918 - if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
4919 + if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
4920 ext4_ext_truncate(inode);
4921 return;
4922 }
4923 @@ -4729,7 +4792,7 @@ int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
4924 {
4925 /* We have all inode data except xattrs in memory here. */
4926 return __ext4_get_inode_loc(inode, iloc,
4927 - !(EXT4_I(inode)->i_state & EXT4_STATE_XATTR));
4928 + !ext4_test_inode_state(inode, EXT4_STATE_XATTR));
4929 }
4930
4931 void ext4_set_inode_flags(struct inode *inode)
4932 @@ -4823,7 +4886,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
4933 }
4934 inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
4935
4936 - ei->i_state = 0;
4937 + ei->i_state_flags = 0;
4938 ei->i_dir_start_lookup = 0;
4939 ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
4940 /* We now have enough fields to check if the inode was active or not.
4941 @@ -4906,7 +4969,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
4942 EXT4_GOOD_OLD_INODE_SIZE +
4943 ei->i_extra_isize;
4944 if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC))
4945 - ei->i_state |= EXT4_STATE_XATTR;
4946 + ext4_set_inode_state(inode, EXT4_STATE_XATTR);
4947 }
4948 } else
4949 ei->i_extra_isize = 0;
4950 @@ -5046,7 +5109,7 @@ static int ext4_do_update_inode(handle_t *handle,
4951
4952 /* For fields not not tracking in the in-memory inode,
4953 * initialise them to zero for new inodes. */
4954 - if (ei->i_state & EXT4_STATE_NEW)
4955 + if (ext4_test_inode_state(inode, EXT4_STATE_NEW))
4956 memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
4957
4958 ext4_get_inode_flags(ei);
4959 @@ -5110,7 +5173,7 @@ static int ext4_do_update_inode(handle_t *handle,
4960 EXT4_FEATURE_RO_COMPAT_LARGE_FILE);
4961 sb->s_dirt = 1;
4962 ext4_handle_sync(handle);
4963 - err = ext4_handle_dirty_metadata(handle, inode,
4964 + err = ext4_handle_dirty_metadata(handle, NULL,
4965 EXT4_SB(sb)->s_sbh);
4966 }
4967 }
4968 @@ -5139,10 +5202,10 @@ static int ext4_do_update_inode(handle_t *handle,
4969 }
4970
4971 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
4972 - rc = ext4_handle_dirty_metadata(handle, inode, bh);
4973 + rc = ext4_handle_dirty_metadata(handle, NULL, bh);
4974 if (!err)
4975 err = rc;
4976 - ei->i_state &= ~EXT4_STATE_NEW;
4977 + ext4_clear_inode_state(inode, EXT4_STATE_NEW);
4978
4979 ext4_update_inode_fsync_trans(handle, inode, 0);
4980 out_brelse:
4981 @@ -5207,7 +5270,7 @@ int ext4_write_inode(struct inode *inode, int wait)
4982 } else {
4983 struct ext4_iloc iloc;
4984
4985 - err = ext4_get_inode_loc(inode, &iloc);
4986 + err = __ext4_get_inode_loc(inode, &iloc, 0);
4987 if (err)
4988 return err;
4989 if (wait)
4990 @@ -5220,6 +5283,7 @@ int ext4_write_inode(struct inode *inode, int wait)
4991 (unsigned long long)iloc.bh->b_blocknr);
4992 err = -EIO;
4993 }
4994 + brelse(iloc.bh);
4995 }
4996 return err;
4997 }
4998 @@ -5286,7 +5350,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
4999 }
5000
5001 if (attr->ia_valid & ATTR_SIZE) {
5002 - if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) {
5003 + if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
5004 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
5005
5006 if (attr->ia_size > sbi->s_bitmap_maxbytes) {
5007 @@ -5297,7 +5361,9 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
5008 }
5009
5010 if (S_ISREG(inode->i_mode) &&
5011 - attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) {
5012 + attr->ia_valid & ATTR_SIZE &&
5013 + (attr->ia_size < inode->i_size ||
5014 + (ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS)))) {
5015 handle_t *handle;
5016
5017 handle = ext4_journal_start(inode, 3);
5018 @@ -5328,6 +5394,9 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
5019 goto err_out;
5020 }
5021 }
5022 + /* ext4_truncate will clear the flag */
5023 + if ((ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS)))
5024 + ext4_truncate(inode);
5025 }
5026
5027 rc = inode_setattr(inode, attr);
5028 @@ -5402,7 +5471,7 @@ static int ext4_indirect_trans_blocks(struct inode *inode, int nrblocks,
5029
5030 static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
5031 {
5032 - if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
5033 + if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
5034 return ext4_indirect_trans_blocks(inode, nrblocks, chunk);
5035 return ext4_ext_index_trans_blocks(inode, nrblocks, chunk);
5036 }
5037 @@ -5566,8 +5635,8 @@ static int ext4_expand_extra_isize(struct inode *inode,
5038 entry = IFIRST(header);
5039
5040 /* No extended attributes present */
5041 - if (!(EXT4_I(inode)->i_state & EXT4_STATE_XATTR) ||
5042 - header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
5043 + if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
5044 + header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
5045 memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0,
5046 new_extra_isize);
5047 EXT4_I(inode)->i_extra_isize = new_extra_isize;
5048 @@ -5611,7 +5680,7 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
5049 err = ext4_reserve_inode_write(handle, inode, &iloc);
5050 if (ext4_handle_valid(handle) &&
5051 EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
5052 - !(EXT4_I(inode)->i_state & EXT4_STATE_NO_EXPAND)) {
5053 + !ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) {
5054 /*
5055 * We need extra buffer credits since we may write into EA block
5056 * with this same handle. If journal_extend fails, then it will
5057 @@ -5625,7 +5694,8 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
5058 sbi->s_want_extra_isize,
5059 iloc, handle);
5060 if (ret) {
5061 - EXT4_I(inode)->i_state |= EXT4_STATE_NO_EXPAND;
5062 + ext4_set_inode_state(inode,
5063 + EXT4_STATE_NO_EXPAND);
5064 if (mnt_count !=
5065 le16_to_cpu(sbi->s_es->s_mnt_count)) {
5066 ext4_warning(inode->i_sb, __func__,
5067 @@ -5692,7 +5762,7 @@ static int ext4_pin_inode(handle_t *handle, struct inode *inode)
5068 err = jbd2_journal_get_write_access(handle, iloc.bh);
5069 if (!err)
5070 err = ext4_handle_dirty_metadata(handle,
5071 - inode,
5072 + NULL,
5073 iloc.bh);
5074 brelse(iloc.bh);
5075 }
5076 @@ -5736,9 +5806,9 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
5077 */
5078
5079 if (val)
5080 - EXT4_I(inode)->i_flags |= EXT4_JOURNAL_DATA_FL;
5081 + ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
5082 else
5083 - EXT4_I(inode)->i_flags &= ~EXT4_JOURNAL_DATA_FL;
5084 + ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
5085 ext4_set_aops(inode);
5086
5087 jbd2_journal_unlock_updates(journal);
5088 diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
5089 index b63d193..bf5ae88 100644
5090 --- a/fs/ext4/ioctl.c
5091 +++ b/fs/ext4/ioctl.c
5092 @@ -92,6 +92,15 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
5093 flags &= ~EXT4_EXTENTS_FL;
5094 }
5095
5096 + if (flags & EXT4_EOFBLOCKS_FL) {
5097 + /* we don't support adding EOFBLOCKS flag */
5098 + if (!(oldflags & EXT4_EOFBLOCKS_FL)) {
5099 + err = -EOPNOTSUPP;
5100 + goto flags_out;
5101 + }
5102 + } else if (oldflags & EXT4_EOFBLOCKS_FL)
5103 + ext4_truncate(inode);
5104 +
5105 handle = ext4_journal_start(inode, 1);
5106 if (IS_ERR(handle)) {
5107 err = PTR_ERR(handle);
5108 @@ -249,7 +258,8 @@ setversion_out:
5109 if (me.moved_len > 0)
5110 file_remove_suid(donor_filp);
5111
5112 - if (copy_to_user((struct move_extent *)arg, &me, sizeof(me)))
5113 + if (copy_to_user((struct move_extent __user *)arg,
5114 + &me, sizeof(me)))
5115 err = -EFAULT;
5116 mext_out:
5117 fput(donor_filp);
5118 @@ -363,7 +373,30 @@ long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
5119 case EXT4_IOC32_SETRSVSZ:
5120 cmd = EXT4_IOC_SETRSVSZ;
5121 break;
5122 - case EXT4_IOC_GROUP_ADD:
5123 + case EXT4_IOC32_GROUP_ADD: {
5124 + struct compat_ext4_new_group_input __user *uinput;
5125 + struct ext4_new_group_input input;
5126 + mm_segment_t old_fs;
5127 + int err;
5128 +
5129 + uinput = compat_ptr(arg);
5130 + err = get_user(input.group, &uinput->group);
5131 + err |= get_user(input.block_bitmap, &uinput->block_bitmap);
5132 + err |= get_user(input.inode_bitmap, &uinput->inode_bitmap);
5133 + err |= get_user(input.inode_table, &uinput->inode_table);
5134 + err |= get_user(input.blocks_count, &uinput->blocks_count);
5135 + err |= get_user(input.reserved_blocks,
5136 + &uinput->reserved_blocks);
5137 + if (err)
5138 + return -EFAULT;
5139 + old_fs = get_fs();
5140 + set_fs(KERNEL_DS);
5141 + err = ext4_ioctl(file, EXT4_IOC_GROUP_ADD,
5142 + (unsigned long) &input);
5143 + set_fs(old_fs);
5144 + return err;
5145 + }
5146 + case EXT4_IOC_MOVE_EXT:
5147 break;
5148 default:
5149 return -ENOIOCTLCMD;
5150 diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
5151 index 7d71148..04e07e2 100644
5152 --- a/fs/ext4/mballoc.c
5153 +++ b/fs/ext4/mballoc.c
5154 @@ -658,6 +658,27 @@ static void ext4_mb_mark_free_simple(struct super_block *sb,
5155 }
5156 }
5157
5158 +/*
5159 + * Cache the order of the largest free extent we have available in this block
5160 + * group.
5161 + */
5162 +static void
5163 +mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp)
5164 +{
5165 + int i;
5166 + int bits;
5167 +
5168 + grp->bb_largest_free_order = -1; /* uninit */
5169 +
5170 + bits = sb->s_blocksize_bits + 1;
5171 + for (i = bits; i >= 0; i--) {
5172 + if (grp->bb_counters[i] > 0) {
5173 + grp->bb_largest_free_order = i;
5174 + break;
5175 + }
5176 + }
5177 +}
5178 +
5179 static noinline_for_stack
5180 void ext4_mb_generate_buddy(struct super_block *sb,
5181 void *buddy, void *bitmap, ext4_group_t group)
5182 @@ -700,6 +721,7 @@ void ext4_mb_generate_buddy(struct super_block *sb,
5183 */
5184 grp->bb_free = free;
5185 }
5186 + mb_set_largest_free_order(sb, grp);
5187
5188 clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state));
5189
5190 @@ -725,6 +747,9 @@ void ext4_mb_generate_buddy(struct super_block *sb,
5191 * contain blocks_per_page (PAGE_CACHE_SIZE / blocksize) blocks.
5192 * So it can have information regarding groups_per_page which
5193 * is blocks_per_page/2
5194 + *
5195 + * Locking note: This routine takes the block group lock of all groups
5196 + * for this page; do not hold this lock when calling this routine!
5197 */
5198
5199 static int ext4_mb_init_cache(struct page *page, char *incore)
5200 @@ -910,6 +935,11 @@ out:
5201 return err;
5202 }
5203
5204 +/*
5205 + * Locking note: This routine calls ext4_mb_init_cache(), which takes the
5206 + * block group lock of all groups for this page; do not hold the BG lock when
5207 + * calling this routine!
5208 + */
5209 static noinline_for_stack
5210 int ext4_mb_init_group(struct super_block *sb, ext4_group_t group)
5211 {
5212 @@ -1004,6 +1034,11 @@ err:
5213 return ret;
5214 }
5215
5216 +/*
5217 + * Locking note: This routine calls ext4_mb_init_cache(), which takes the
5218 + * block group lock of all groups for this page; do not hold the BG lock when
5219 + * calling this routine!
5220 + */
5221 static noinline_for_stack int
5222 ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
5223 struct ext4_buddy *e4b)
5224 @@ -1150,7 +1185,7 @@ err:
5225 return ret;
5226 }
5227
5228 -static void ext4_mb_release_desc(struct ext4_buddy *e4b)
5229 +static void ext4_mb_unload_buddy(struct ext4_buddy *e4b)
5230 {
5231 if (e4b->bd_bitmap_page)
5232 page_cache_release(e4b->bd_bitmap_page);
5233 @@ -1300,6 +1335,7 @@ static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
5234 buddy = buddy2;
5235 } while (1);
5236 }
5237 + mb_set_largest_free_order(sb, e4b->bd_info);
5238 mb_check_buddy(e4b);
5239 }
5240
5241 @@ -1428,6 +1464,7 @@ static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex)
5242 e4b->bd_info->bb_counters[ord]++;
5243 e4b->bd_info->bb_counters[ord]++;
5244 }
5245 + mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info);
5246
5247 mb_set_bits(EXT4_MB_BITMAP(e4b), ex->fe_start, len0);
5248 mb_check_buddy(e4b);
5249 @@ -1618,7 +1655,7 @@ int ext4_mb_try_best_found(struct ext4_allocation_context *ac,
5250 }
5251
5252 ext4_unlock_group(ac->ac_sb, group);
5253 - ext4_mb_release_desc(e4b);
5254 + ext4_mb_unload_buddy(e4b);
5255
5256 return 0;
5257 }
5258 @@ -1674,7 +1711,7 @@ int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
5259 ext4_mb_use_best_found(ac, e4b);
5260 }
5261 ext4_unlock_group(ac->ac_sb, group);
5262 - ext4_mb_release_desc(e4b);
5263 + ext4_mb_unload_buddy(e4b);
5264
5265 return 0;
5266 }
5267 @@ -1823,16 +1860,22 @@ void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
5268 }
5269 }
5270
5271 +/* This is now called BEFORE we load the buddy bitmap. */
5272 static int ext4_mb_good_group(struct ext4_allocation_context *ac,
5273 ext4_group_t group, int cr)
5274 {
5275 unsigned free, fragments;
5276 - unsigned i, bits;
5277 int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb));
5278 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
5279
5280 BUG_ON(cr < 0 || cr >= 4);
5281 - BUG_ON(EXT4_MB_GRP_NEED_INIT(grp));
5282 +
5283 + /* We only do this if the grp has never been initialized */
5284 + if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
5285 + int ret = ext4_mb_init_group(ac->ac_sb, group);
5286 + if (ret)
5287 + return 0;
5288 + }
5289
5290 free = grp->bb_free;
5291 fragments = grp->bb_fragments;
5292 @@ -1845,17 +1888,16 @@ static int ext4_mb_good_group(struct ext4_allocation_context *ac,
5293 case 0:
5294 BUG_ON(ac->ac_2order == 0);
5295
5296 + if (grp->bb_largest_free_order < ac->ac_2order)
5297 + return 0;
5298 +
5299 /* Avoid using the first bg of a flexgroup for data files */
5300 if ((ac->ac_flags & EXT4_MB_HINT_DATA) &&
5301 (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) &&
5302 ((group % flex_size) == 0))
5303 return 0;
5304
5305 - bits = ac->ac_sb->s_blocksize_bits + 1;
5306 - for (i = ac->ac_2order; i <= bits; i++)
5307 - if (grp->bb_counters[i] > 0)
5308 - return 1;
5309 - break;
5310 + return 1;
5311 case 1:
5312 if ((free / fragments) >= ac->ac_g_ex.fe_len)
5313 return 1;
5314 @@ -1966,7 +2008,7 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
5315 sbi = EXT4_SB(sb);
5316 ngroups = ext4_get_groups_count(sb);
5317 /* non-extent files are limited to low blocks/groups */
5318 - if (!(EXT4_I(ac->ac_inode)->i_flags & EXT4_EXTENTS_FL))
5319 + if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)))
5320 ngroups = sbi->s_blockfile_groups;
5321
5322 BUG_ON(ac->ac_status == AC_STATUS_FOUND);
5323 @@ -2026,15 +2068,11 @@ repeat:
5324 group = ac->ac_g_ex.fe_group;
5325
5326 for (i = 0; i < ngroups; group++, i++) {
5327 - struct ext4_group_info *grp;
5328 - struct ext4_group_desc *desc;
5329 -
5330 if (group == ngroups)
5331 group = 0;
5332
5333 - /* quick check to skip empty groups */
5334 - grp = ext4_get_group_info(sb, group);
5335 - if (grp->bb_free == 0)
5336 + /* This now checks without needing the buddy page */
5337 + if (!ext4_mb_good_group(ac, group, cr))
5338 continue;
5339
5340 err = ext4_mb_load_buddy(sb, group, &e4b);
5341 @@ -2042,15 +2080,18 @@ repeat:
5342 goto out;
5343
5344 ext4_lock_group(sb, group);
5345 +
5346 + /*
5347 + * We need to check again after locking the
5348 + * block group
5349 + */
5350 if (!ext4_mb_good_group(ac, group, cr)) {
5351 - /* someone did allocation from this group */
5352 ext4_unlock_group(sb, group);
5353 - ext4_mb_release_desc(&e4b);
5354 + ext4_mb_unload_buddy(&e4b);
5355 continue;
5356 }
5357
5358 ac->ac_groups_scanned++;
5359 - desc = ext4_get_group_desc(sb, group, NULL);
5360 if (cr == 0)
5361 ext4_mb_simple_scan_group(ac, &e4b);
5362 else if (cr == 1 &&
5363 @@ -2060,7 +2101,7 @@ repeat:
5364 ext4_mb_complex_scan_group(ac, &e4b);
5365
5366 ext4_unlock_group(sb, group);
5367 - ext4_mb_release_desc(&e4b);
5368 + ext4_mb_unload_buddy(&e4b);
5369
5370 if (ac->ac_status != AC_STATUS_CONTINUE)
5371 break;
5372 @@ -2150,7 +2191,7 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
5373 ext4_lock_group(sb, group);
5374 memcpy(&sg, ext4_get_group_info(sb, group), i);
5375 ext4_unlock_group(sb, group);
5376 - ext4_mb_release_desc(&e4b);
5377 + ext4_mb_unload_buddy(&e4b);
5378
5379 seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free,
5380 sg.info.bb_fragments, sg.info.bb_first_free);
5381 @@ -2257,6 +2298,7 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
5382 INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list);
5383 init_rwsem(&meta_group_info[i]->alloc_sem);
5384 meta_group_info[i]->bb_free_root.rb_node = NULL;
5385 + meta_group_info[i]->bb_largest_free_order = -1; /* uninit */
5386
5387 #ifdef DOUBLE_CHECK
5388 {
5389 @@ -2537,6 +2579,23 @@ static void release_blocks_on_commit(journal_t *journal, transaction_t *txn)
5390 mb_debug(1, "gonna free %u blocks in group %u (0x%p):",
5391 entry->count, entry->group, entry);
5392
5393 + if (test_opt(sb, DISCARD)) {
5394 + int ret;
5395 + ext4_fsblk_t discard_block;
5396 +
5397 + discard_block = entry->start_blk +
5398 + ext4_group_first_block_no(sb, entry->group);
5399 + trace_ext4_discard_blocks(sb,
5400 + (unsigned long long)discard_block,
5401 + entry->count);
5402 + ret = sb_issue_discard(sb, discard_block, entry->count);
5403 + if (ret == EOPNOTSUPP) {
5404 + ext4_warning(sb, __func__,
5405 + "discard not supported, disabling");
5406 + clear_opt(EXT4_SB(sb)->s_mount_opt, DISCARD);
5407 + }
5408 + }
5409 +
5410 err = ext4_mb_load_buddy(sb, entry->group, &e4b);
5411 /* we expect to find existing buddy because it's pinned */
5412 BUG_ON(err != 0);
5413 @@ -2558,21 +2617,8 @@ static void release_blocks_on_commit(journal_t *journal, transaction_t *txn)
5414 page_cache_release(e4b.bd_bitmap_page);
5415 }
5416 ext4_unlock_group(sb, entry->group);
5417 - if (test_opt(sb, DISCARD)) {
5418 - ext4_fsblk_t discard_block;
5419 - struct ext4_super_block *es = EXT4_SB(sb)->s_es;
5420 -
5421 - discard_block = (ext4_fsblk_t)entry->group *
5422 - EXT4_BLOCKS_PER_GROUP(sb)
5423 - + entry->start_blk
5424 - + le32_to_cpu(es->s_first_data_block);
5425 - trace_ext4_discard_blocks(sb,
5426 - (unsigned long long)discard_block,
5427 - entry->count);
5428 - sb_issue_discard(sb, discard_block, entry->count);
5429 - }
5430 kmem_cache_free(ext4_free_ext_cachep, entry);
5431 - ext4_mb_release_desc(&e4b);
5432 + ext4_mb_unload_buddy(&e4b);
5433 }
5434
5435 mb_debug(1, "freed %u blocks in %u structures\n", count, count2);
5436 @@ -2755,12 +2801,6 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
5437 if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED))
5438 /* release all the reserved blocks if non delalloc */
5439 percpu_counter_sub(&sbi->s_dirtyblocks_counter, reserv_blks);
5440 - else {
5441 - percpu_counter_sub(&sbi->s_dirtyblocks_counter,
5442 - ac->ac_b_ex.fe_len);
5443 - /* convert reserved quota blocks to real quota blocks */
5444 - vfs_dq_claim_block(ac->ac_inode, ac->ac_b_ex.fe_len);
5445 - }
5446
5447 if (sbi->s_log_groups_per_flex) {
5448 ext4_group_t flex_group = ext4_flex_group(sbi,
5449 @@ -3136,7 +3176,7 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
5450 continue;
5451
5452 /* non-extent files can't have physical blocks past 2^32 */
5453 - if (!(EXT4_I(ac->ac_inode)->i_flags & EXT4_EXTENTS_FL) &&
5454 + if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) &&
5455 pa->pa_pstart + pa->pa_len > EXT4_MAX_BLOCK_FILE_PHYS)
5456 continue;
5457
5458 @@ -3715,7 +3755,7 @@ out:
5459 ext4_unlock_group(sb, group);
5460 if (ac)
5461 kmem_cache_free(ext4_ac_cachep, ac);
5462 - ext4_mb_release_desc(&e4b);
5463 + ext4_mb_unload_buddy(&e4b);
5464 put_bh(bitmap_bh);
5465 return free;
5466 }
5467 @@ -3819,7 +3859,7 @@ repeat:
5468 if (bitmap_bh == NULL) {
5469 ext4_error(sb, __func__, "Error in reading block "
5470 "bitmap for %u", group);
5471 - ext4_mb_release_desc(&e4b);
5472 + ext4_mb_unload_buddy(&e4b);
5473 continue;
5474 }
5475
5476 @@ -3828,7 +3868,7 @@ repeat:
5477 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa, ac);
5478 ext4_unlock_group(sb, group);
5479
5480 - ext4_mb_release_desc(&e4b);
5481 + ext4_mb_unload_buddy(&e4b);
5482 put_bh(bitmap_bh);
5483
5484 list_del(&pa->u.pa_tmp_list);
5485 @@ -3944,7 +3984,7 @@ static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
5486
5487 /* don't use group allocation for large files */
5488 size = max(size, isize);
5489 - if (size >= sbi->s_mb_stream_request) {
5490 + if (size > sbi->s_mb_stream_request) {
5491 ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
5492 return;
5493 }
5494 @@ -4092,7 +4132,7 @@ ext4_mb_discard_lg_preallocations(struct super_block *sb,
5495 ext4_mb_release_group_pa(&e4b, pa, ac);
5496 ext4_unlock_group(sb, group);
5497
5498 - ext4_mb_release_desc(&e4b);
5499 + ext4_mb_unload_buddy(&e4b);
5500 list_del(&pa->u.pa_tmp_list);
5501 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
5502 }
5503 @@ -4594,7 +4634,7 @@ do_more:
5504 atomic_add(count, &sbi->s_flex_groups[flex_group].free_blocks);
5505 }
5506
5507 - ext4_mb_release_desc(&e4b);
5508 + ext4_mb_unload_buddy(&e4b);
5509
5510 *freed += count;
5511
5512 diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
5513 index 8646149..7901f13 100644
5514 --- a/fs/ext4/migrate.c
5515 +++ b/fs/ext4/migrate.c
5516 @@ -357,12 +357,12 @@ static int ext4_ext_swap_inode_data(handle_t *handle, struct inode *inode,
5517 * happened after we started the migrate. We need to
5518 * fail the migrate
5519 */
5520 - if (!(EXT4_I(inode)->i_state & EXT4_STATE_EXT_MIGRATE)) {
5521 + if (!ext4_test_inode_state(inode, EXT4_STATE_EXT_MIGRATE)) {
5522 retval = -EAGAIN;
5523 up_write(&EXT4_I(inode)->i_data_sem);
5524 goto err_out;
5525 } else
5526 - EXT4_I(inode)->i_state &= ~EXT4_STATE_EXT_MIGRATE;
5527 + ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
5528 /*
5529 * We have the extent map build with the tmp inode.
5530 * Now copy the i_data across
5531 @@ -465,7 +465,7 @@ int ext4_ext_migrate(struct inode *inode)
5532 */
5533 if (!EXT4_HAS_INCOMPAT_FEATURE(inode->i_sb,
5534 EXT4_FEATURE_INCOMPAT_EXTENTS) ||
5535 - (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
5536 + (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
5537 return -EINVAL;
5538
5539 if (S_ISLNK(inode->i_mode) && inode->i_blocks == 0)
5540 @@ -494,14 +494,10 @@ int ext4_ext_migrate(struct inode *inode)
5541 }
5542 i_size_write(tmp_inode, i_size_read(inode));
5543 /*
5544 - * We don't want the inode to be reclaimed
5545 - * if we got interrupted in between. We have
5546 - * this tmp inode carrying reference to the
5547 - * data blocks of the original file. We set
5548 - * the i_nlink to zero at the last stage after
5549 - * switching the original file to extent format
5550 + * Set the i_nlink to zero so it will be deleted later
5551 + * when we drop inode reference.
5552 */
5553 - tmp_inode->i_nlink = 1;
5554 + tmp_inode->i_nlink = 0;
5555
5556 ext4_ext_tree_init(handle, tmp_inode);
5557 ext4_orphan_add(handle, tmp_inode);
5558 @@ -524,10 +520,20 @@ int ext4_ext_migrate(struct inode *inode)
5559 * allocation.
5560 */
5561 down_read((&EXT4_I(inode)->i_data_sem));
5562 - EXT4_I(inode)->i_state |= EXT4_STATE_EXT_MIGRATE;
5563 + ext4_set_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
5564 up_read((&EXT4_I(inode)->i_data_sem));
5565
5566 handle = ext4_journal_start(inode, 1);
5567 + if (IS_ERR(handle)) {
5568 + /*
5569 + * It is impossible to update on-disk structures without
5570 + * a handle, so just rollback in-core changes and live other
5571 + * work to orphan_list_cleanup()
5572 + */
5573 + ext4_orphan_del(NULL, tmp_inode);
5574 + retval = PTR_ERR(handle);
5575 + goto out;
5576 + }
5577
5578 ei = EXT4_I(inode);
5579 i_data = ei->i_data;
5580 @@ -609,15 +615,8 @@ err_out:
5581
5582 /* Reset the extent details */
5583 ext4_ext_tree_init(handle, tmp_inode);
5584 -
5585 - /*
5586 - * Set the i_nlink to zero so that
5587 - * generic_drop_inode really deletes the
5588 - * inode
5589 - */
5590 - tmp_inode->i_nlink = 0;
5591 -
5592 ext4_journal_stop(handle);
5593 +out:
5594 unlock_new_inode(tmp_inode);
5595 iput(tmp_inode);
5596
5597 diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
5598 index f5b03a1..5b14d11 100644
5599 --- a/fs/ext4/move_extent.c
5600 +++ b/fs/ext4/move_extent.c
5601 @@ -252,6 +252,7 @@ mext_insert_across_blocks(handle_t *handle, struct inode *orig_inode,
5602 }
5603
5604 o_start->ee_len = start_ext->ee_len;
5605 + eblock = le32_to_cpu(start_ext->ee_block);
5606 new_flag = 1;
5607
5608 } else if (start_ext->ee_len && new_ext->ee_len &&
5609 @@ -262,6 +263,7 @@ mext_insert_across_blocks(handle_t *handle, struct inode *orig_inode,
5610 * orig |------------------------------|
5611 */
5612 o_start->ee_len = start_ext->ee_len;
5613 + eblock = le32_to_cpu(start_ext->ee_block);
5614 new_flag = 1;
5615
5616 } else if (!start_ext->ee_len && new_ext->ee_len &&
5617 @@ -475,7 +477,6 @@ mext_leaf_block(handle_t *handle, struct inode *orig_inode,
5618 struct ext4_extent *oext, *o_start, *o_end, *prev_ext;
5619 struct ext4_extent new_ext, start_ext, end_ext;
5620 ext4_lblk_t new_ext_end;
5621 - ext4_fsblk_t new_phys_end;
5622 int oext_alen, new_ext_alen, end_ext_alen;
5623 int depth = ext_depth(orig_inode);
5624 int ret;
5625 @@ -489,7 +490,6 @@ mext_leaf_block(handle_t *handle, struct inode *orig_inode,
5626 new_ext.ee_len = dext->ee_len;
5627 new_ext_alen = ext4_ext_get_actual_len(&new_ext);
5628 new_ext_end = le32_to_cpu(new_ext.ee_block) + new_ext_alen - 1;
5629 - new_phys_end = ext_pblock(&new_ext) + new_ext_alen - 1;
5630
5631 /*
5632 * Case: original extent is first
5633 @@ -502,6 +502,7 @@ mext_leaf_block(handle_t *handle, struct inode *orig_inode,
5634 le32_to_cpu(oext->ee_block) + oext_alen) {
5635 start_ext.ee_len = cpu_to_le16(le32_to_cpu(new_ext.ee_block) -
5636 le32_to_cpu(oext->ee_block));
5637 + start_ext.ee_block = oext->ee_block;
5638 copy_extent_status(oext, &start_ext);
5639 } else if (oext > EXT_FIRST_EXTENT(orig_path[depth].p_hdr)) {
5640 prev_ext = oext - 1;
5641 @@ -515,6 +516,7 @@ mext_leaf_block(handle_t *handle, struct inode *orig_inode,
5642 start_ext.ee_len = cpu_to_le16(
5643 ext4_ext_get_actual_len(prev_ext) +
5644 new_ext_alen);
5645 + start_ext.ee_block = oext->ee_block;
5646 copy_extent_status(prev_ext, &start_ext);
5647 new_ext.ee_len = 0;
5648 }
5649 @@ -928,7 +930,7 @@ out2:
5650 }
5651
5652 /**
5653 - * mext_check_argumants - Check whether move extent can be done
5654 + * mext_check_arguments - Check whether move extent can be done
5655 *
5656 * @orig_inode: original inode
5657 * @donor_inode: donor inode
5658 @@ -949,14 +951,6 @@ mext_check_arguments(struct inode *orig_inode,
5659 unsigned int blkbits = orig_inode->i_blkbits;
5660 unsigned int blocksize = 1 << blkbits;
5661
5662 - /* Regular file check */
5663 - if (!S_ISREG(orig_inode->i_mode) || !S_ISREG(donor_inode->i_mode)) {
5664 - ext4_debug("ext4 move extent: The argument files should be "
5665 - "regular file [ino:orig %lu, donor %lu]\n",
5666 - orig_inode->i_ino, donor_inode->i_ino);
5667 - return -EINVAL;
5668 - }
5669 -
5670 if (donor_inode->i_mode & (S_ISUID|S_ISGID)) {
5671 ext4_debug("ext4 move extent: suid or sgid is set"
5672 " to donor file [ino:orig %lu, donor %lu]\n",
5673 @@ -981,11 +975,11 @@ mext_check_arguments(struct inode *orig_inode,
5674 }
5675
5676 /* Ext4 move extent supports only extent based file */
5677 - if (!(EXT4_I(orig_inode)->i_flags & EXT4_EXTENTS_FL)) {
5678 + if (!(ext4_test_inode_flag(orig_inode, EXT4_INODE_EXTENTS))) {
5679 ext4_debug("ext4 move extent: orig file is not extents "
5680 "based file [ino:orig %lu]\n", orig_inode->i_ino);
5681 return -EOPNOTSUPP;
5682 - } else if (!(EXT4_I(donor_inode)->i_flags & EXT4_EXTENTS_FL)) {
5683 + } else if (!(ext4_test_inode_flag(donor_inode, EXT4_INODE_EXTENTS))) {
5684 ext4_debug("ext4 move extent: donor file is not extents "
5685 "based file [ino:donor %lu]\n", donor_inode->i_ino);
5686 return -EOPNOTSUPP;
5687 @@ -1204,6 +1198,14 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
5688 return -EINVAL;
5689 }
5690
5691 + /* Regular file check */
5692 + if (!S_ISREG(orig_inode->i_mode) || !S_ISREG(donor_inode->i_mode)) {
5693 + ext4_debug("ext4 move extent: The argument files should be "
5694 + "regular file [ino:orig %lu, donor %lu]\n",
5695 + orig_inode->i_ino, donor_inode->i_ino);
5696 + return -EINVAL;
5697 + }
5698 +
5699 /* Protect orig and donor inodes against a truncate */
5700 ret1 = mext_inode_double_lock(orig_inode, donor_inode);
5701 if (ret1 < 0)
5702 diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
5703 index 17a17e1..c3b6ad0 100644
5704 --- a/fs/ext4/namei.c
5705 +++ b/fs/ext4/namei.c
5706 @@ -660,7 +660,7 @@ int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
5707 dxtrace(printk(KERN_DEBUG "In htree_fill_tree, start hash: %x:%x\n",
5708 start_hash, start_minor_hash));
5709 dir = dir_file->f_path.dentry->d_inode;
5710 - if (!(EXT4_I(dir)->i_flags & EXT4_INDEX_FL)) {
5711 + if (!(ext4_test_inode_flag(dir, EXT4_INODE_INDEX))) {
5712 hinfo.hash_version = EXT4_SB(dir->i_sb)->s_def_hash_version;
5713 if (hinfo.hash_version <= DX_HASH_TEA)
5714 hinfo.hash_version +=
5715 @@ -805,7 +805,7 @@ static void ext4_update_dx_flag(struct inode *inode)
5716 {
5717 if (!EXT4_HAS_COMPAT_FEATURE(inode->i_sb,
5718 EXT4_FEATURE_COMPAT_DIR_INDEX))
5719 - EXT4_I(inode)->i_flags &= ~EXT4_INDEX_FL;
5720 + ext4_clear_inode_flag(inode, EXT4_INODE_INDEX);
5721 }
5722
5723 /*
5724 @@ -1424,7 +1424,7 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
5725 brelse(bh);
5726 return retval;
5727 }
5728 - EXT4_I(dir)->i_flags |= EXT4_INDEX_FL;
5729 + ext4_set_inode_flag(dir, EXT4_INODE_INDEX);
5730 data1 = bh2->b_data;
5731
5732 memcpy (data1, de, len);
5733 @@ -1497,7 +1497,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
5734 retval = ext4_dx_add_entry(handle, dentry, inode);
5735 if (!retval || (retval != ERR_BAD_DX_DIR))
5736 return retval;
5737 - EXT4_I(dir)->i_flags &= ~EXT4_INDEX_FL;
5738 + ext4_clear_inode_flag(dir, EXT4_INODE_INDEX);
5739 dx_fallback++;
5740 ext4_mark_inode_dirty(handle, dir);
5741 }
5742 @@ -1525,6 +1525,8 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
5743 de->rec_len = ext4_rec_len_to_disk(blocksize, blocksize);
5744 retval = add_dirent_to_buf(handle, dentry, inode, de, bh);
5745 brelse(bh);
5746 + if (retval == 0)
5747 + ext4_set_inode_state(inode, EXT4_STATE_NEWENTRY);
5748 return retval;
5749 }
5750
5751 @@ -2020,11 +2022,18 @@ int ext4_orphan_add(handle_t *handle, struct inode *inode)
5752 err = ext4_reserve_inode_write(handle, inode, &iloc);
5753 if (err)
5754 goto out_unlock;
5755 + /*
5756 + * Due to previous errors inode may be already a part of on-disk
5757 + * orphan list. If so skip on-disk list modification.
5758 + */
5759 + if (NEXT_ORPHAN(inode) && NEXT_ORPHAN(inode) <=
5760 + (le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count)))
5761 + goto mem_insert;
5762
5763 /* Insert this inode at the head of the on-disk orphan list... */
5764 NEXT_ORPHAN(inode) = le32_to_cpu(EXT4_SB(sb)->s_es->s_last_orphan);
5765 EXT4_SB(sb)->s_es->s_last_orphan = cpu_to_le32(inode->i_ino);
5766 - err = ext4_handle_dirty_metadata(handle, inode, EXT4_SB(sb)->s_sbh);
5767 + err = ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
5768 rc = ext4_mark_iloc_dirty(handle, inode, &iloc);
5769 if (!err)
5770 err = rc;
5771 @@ -2037,6 +2046,7 @@ int ext4_orphan_add(handle_t *handle, struct inode *inode)
5772 *
5773 * This is safe: on error we're going to ignore the orphan list
5774 * anyway on the next recovery. */
5775 +mem_insert:
5776 if (!err)
5777 list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan);
5778
5779 @@ -2096,7 +2106,7 @@ int ext4_orphan_del(handle_t *handle, struct inode *inode)
5780 if (err)
5781 goto out_brelse;
5782 sbi->s_es->s_last_orphan = cpu_to_le32(ino_next);
5783 - err = ext4_handle_dirty_metadata(handle, inode, sbi->s_sbh);
5784 + err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh);
5785 } else {
5786 struct ext4_iloc iloc2;
5787 struct inode *i_prev =
5788 @@ -2284,7 +2294,7 @@ retry:
5789 }
5790 } else {
5791 /* clear the extent format for fast symlink */
5792 - EXT4_I(inode)->i_flags &= ~EXT4_EXTENTS_FL;
5793 + ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS);
5794 inode->i_op = &ext4_fast_symlink_inode_operations;
5795 memcpy((char *)&EXT4_I(inode)->i_data, symname, l);
5796 inode->i_size = l-1;
5797 diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
5798 index 3b2c554..433ea27 100644
5799 --- a/fs/ext4/resize.c
5800 +++ b/fs/ext4/resize.c
5801 @@ -930,7 +930,8 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
5802 percpu_counter_add(&sbi->s_freeinodes_counter,
5803 EXT4_INODES_PER_GROUP(sb));
5804
5805 - if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) {
5806 + if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG) &&
5807 + sbi->s_log_groups_per_flex) {
5808 ext4_group_t flex_group;
5809 flex_group = ext4_flex_group(sbi, input->group);
5810 atomic_add(input->free_blocks_count,
5811 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
5812 index 92943f2..54a05cc 100644
5813 --- a/fs/ext4/super.c
5814 +++ b/fs/ext4/super.c
5815 @@ -227,6 +227,7 @@ handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks)
5816 if (sb->s_flags & MS_RDONLY)
5817 return ERR_PTR(-EROFS);
5818
5819 + vfs_check_frozen(sb, SB_FREEZE_WRITE);
5820 /* Special case here: if the journal has aborted behind our
5821 * backs (eg. EIO in the commit thread), then we still need to
5822 * take the FS itself readonly cleanly. */
5823 @@ -702,6 +703,7 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
5824 ei->i_reserved_data_blocks = 0;
5825 ei->i_reserved_meta_blocks = 0;
5826 ei->i_allocated_meta_blocks = 0;
5827 + ei->i_da_metadata_calc_len = 0;
5828 ei->i_delalloc_reserved_flag = 0;
5829 spin_lock_init(&(ei->i_block_reservation_lock));
5830 #ifdef CONFIG_QUOTA
5831 @@ -875,6 +877,8 @@ static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs)
5832 seq_puts(seq, test_opt(sb, BARRIER) ? "1" : "0");
5833 if (test_opt(sb, JOURNAL_ASYNC_COMMIT))
5834 seq_puts(seq, ",journal_async_commit");
5835 + else if (test_opt(sb, JOURNAL_CHECKSUM))
5836 + seq_puts(seq, ",journal_checksum");
5837 if (test_opt(sb, NOBH))
5838 seq_puts(seq, ",nobh");
5839 if (test_opt(sb, I_VERSION))
5840 @@ -2693,24 +2697,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
5841 get_random_bytes(&sbi->s_next_generation, sizeof(u32));
5842 spin_lock_init(&sbi->s_next_gen_lock);
5843
5844 - err = percpu_counter_init(&sbi->s_freeblocks_counter,
5845 - ext4_count_free_blocks(sb));
5846 - if (!err) {
5847 - err = percpu_counter_init(&sbi->s_freeinodes_counter,
5848 - ext4_count_free_inodes(sb));
5849 - }
5850 - if (!err) {
5851 - err = percpu_counter_init(&sbi->s_dirs_counter,
5852 - ext4_count_dirs(sb));
5853 - }
5854 - if (!err) {
5855 - err = percpu_counter_init(&sbi->s_dirtyblocks_counter, 0);
5856 - }
5857 - if (err) {
5858 - ext4_msg(sb, KERN_ERR, "insufficient memory");
5859 - goto failed_mount3;
5860 - }
5861 -
5862 sbi->s_stripe = ext4_get_stripe_size(sbi);
5863 sbi->s_max_writeback_mb_bump = 128;
5864
5865 @@ -2830,7 +2816,20 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
5866 set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
5867
5868 no_journal:
5869 -
5870 + err = percpu_counter_init(&sbi->s_freeblocks_counter,
5871 + ext4_count_free_blocks(sb));
5872 + if (!err)
5873 + err = percpu_counter_init(&sbi->s_freeinodes_counter,
5874 + ext4_count_free_inodes(sb));
5875 + if (!err)
5876 + err = percpu_counter_init(&sbi->s_dirs_counter,
5877 + ext4_count_dirs(sb));
5878 + if (!err)
5879 + err = percpu_counter_init(&sbi->s_dirtyblocks_counter, 0);
5880 + if (err) {
5881 + ext4_msg(sb, KERN_ERR, "insufficient memory");
5882 + goto failed_mount_wq;
5883 + }
5884 if (test_opt(sb, NOBH)) {
5885 if (!(test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)) {
5886 ext4_msg(sb, KERN_WARNING, "Ignoring nobh option - "
5887 @@ -2905,7 +2904,7 @@ no_journal:
5888 err = ext4_setup_system_zone(sb);
5889 if (err) {
5890 ext4_msg(sb, KERN_ERR, "failed to initialize system "
5891 - "zone (%d)\n", err);
5892 + "zone (%d)", err);
5893 goto failed_mount4;
5894 }
5895
5896 @@ -2963,6 +2962,10 @@ failed_mount_wq:
5897 jbd2_journal_destroy(sbi->s_journal);
5898 sbi->s_journal = NULL;
5899 }
5900 + percpu_counter_destroy(&sbi->s_freeblocks_counter);
5901 + percpu_counter_destroy(&sbi->s_freeinodes_counter);
5902 + percpu_counter_destroy(&sbi->s_dirs_counter);
5903 + percpu_counter_destroy(&sbi->s_dirtyblocks_counter);
5904 failed_mount3:
5905 if (sbi->s_flex_groups) {
5906 if (is_vmalloc_addr(sbi->s_flex_groups))
5907 @@ -2970,10 +2973,6 @@ failed_mount3:
5908 else
5909 kfree(sbi->s_flex_groups);
5910 }
5911 - percpu_counter_destroy(&sbi->s_freeblocks_counter);
5912 - percpu_counter_destroy(&sbi->s_freeinodes_counter);
5913 - percpu_counter_destroy(&sbi->s_dirs_counter);
5914 - percpu_counter_destroy(&sbi->s_dirtyblocks_counter);
5915 failed_mount2:
5916 for (i = 0; i < db_count; i++)
5917 brelse(sbi->s_group_desc[i]);
5918 @@ -3390,8 +3389,10 @@ int ext4_force_commit(struct super_block *sb)
5919 return 0;
5920
5921 journal = EXT4_SB(sb)->s_journal;
5922 - if (journal)
5923 + if (journal) {
5924 + vfs_check_frozen(sb, SB_FREEZE_WRITE);
5925 ret = ext4_journal_force_commit(journal);
5926 + }
5927
5928 return ret;
5929 }
5930 @@ -3440,18 +3441,16 @@ static int ext4_freeze(struct super_block *sb)
5931 * the journal.
5932 */
5933 error = jbd2_journal_flush(journal);
5934 - if (error < 0) {
5935 - out:
5936 - jbd2_journal_unlock_updates(journal);
5937 - return error;
5938 - }
5939 + if (error < 0)
5940 + goto out;
5941
5942 /* Journal blocked and flushed, clear needs_recovery flag. */
5943 EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
5944 error = ext4_commit_super(sb, 1);
5945 - if (error)
5946 - goto out;
5947 - return 0;
5948 +out:
5949 + /* we rely on s_frozen to stop further updates */
5950 + jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
5951 + return error;
5952 }
5953
5954 /*
5955 @@ -3468,7 +3467,6 @@ static int ext4_unfreeze(struct super_block *sb)
5956 EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
5957 ext4_commit_super(sb, 1);
5958 unlock_super(sb);
5959 - jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
5960 return 0;
5961 }
5962
5963 @@ -4001,6 +3999,7 @@ static int __init init_ext4_fs(void)
5964 {
5965 int err;
5966
5967 + ext4_check_flag_values();
5968 err = init_ext4_system_zone();
5969 if (err)
5970 return err;
5971 diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
5972 index 0257019..4de7d0a 100644
5973 --- a/fs/ext4/xattr.c
5974 +++ b/fs/ext4/xattr.c
5975 @@ -267,7 +267,7 @@ ext4_xattr_ibody_get(struct inode *inode, int name_index, const char *name,
5976 void *end;
5977 int error;
5978
5979 - if (!(EXT4_I(inode)->i_state & EXT4_STATE_XATTR))
5980 + if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR))
5981 return -ENODATA;
5982 error = ext4_get_inode_loc(inode, &iloc);
5983 if (error)
5984 @@ -393,7 +393,7 @@ ext4_xattr_ibody_list(struct inode *inode, char *buffer, size_t buffer_size)
5985 void *end;
5986 int error;
5987
5988 - if (!(EXT4_I(inode)->i_state & EXT4_STATE_XATTR))
5989 + if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR))
5990 return 0;
5991 error = ext4_get_inode_loc(inode, &iloc);
5992 if (error)
5993 @@ -816,7 +816,7 @@ inserted:
5994 EXT4_I(inode)->i_block_group);
5995
5996 /* non-extent files can't have physical blocks past 2^32 */
5997 - if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
5998 + if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
5999 goal = goal & EXT4_MAX_BLOCK_FILE_PHYS;
6000
6001 block = ext4_new_meta_blocks(handle, inode,
6002 @@ -824,7 +824,7 @@ inserted:
6003 if (error)
6004 goto cleanup;
6005
6006 - if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
6007 + if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
6008 BUG_ON(block > EXT4_MAX_BLOCK_FILE_PHYS);
6009
6010 ea_idebug(inode, "creating block %d", block);
6011 @@ -903,7 +903,7 @@ ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
6012 is->s.base = is->s.first = IFIRST(header);
6013 is->s.here = is->s.first;
6014 is->s.end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
6015 - if (EXT4_I(inode)->i_state & EXT4_STATE_XATTR) {
6016 + if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
6017 error = ext4_xattr_check_names(IFIRST(header), is->s.end);
6018 if (error)
6019 return error;
6020 @@ -935,10 +935,10 @@ ext4_xattr_ibody_set(handle_t *handle, struct inode *inode,
6021 header = IHDR(inode, ext4_raw_inode(&is->iloc));
6022 if (!IS_LAST_ENTRY(s->first)) {
6023 header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
6024 - EXT4_I(inode)->i_state |= EXT4_STATE_XATTR;
6025 + ext4_set_inode_state(inode, EXT4_STATE_XATTR);
6026 } else {
6027 header->h_magic = cpu_to_le32(0);
6028 - EXT4_I(inode)->i_state &= ~EXT4_STATE_XATTR;
6029 + ext4_clear_inode_state(inode, EXT4_STATE_XATTR);
6030 }
6031 return 0;
6032 }
6033 @@ -981,8 +981,8 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
6034 if (strlen(name) > 255)
6035 return -ERANGE;
6036 down_write(&EXT4_I(inode)->xattr_sem);
6037 - no_expand = EXT4_I(inode)->i_state & EXT4_STATE_NO_EXPAND;
6038 - EXT4_I(inode)->i_state |= EXT4_STATE_NO_EXPAND;
6039 + no_expand = ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND);
6040 + ext4_set_inode_state(inode, EXT4_STATE_NO_EXPAND);
6041
6042 error = ext4_get_inode_loc(inode, &is.iloc);
6043 if (error)
6044 @@ -992,10 +992,10 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
6045 if (error)
6046 goto cleanup;
6047
6048 - if (EXT4_I(inode)->i_state & EXT4_STATE_NEW) {
6049 + if (ext4_test_inode_state(inode, EXT4_STATE_NEW)) {
6050 struct ext4_inode *raw_inode = ext4_raw_inode(&is.iloc);
6051 memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
6052 - EXT4_I(inode)->i_state &= ~EXT4_STATE_NEW;
6053 + ext4_clear_inode_state(inode, EXT4_STATE_NEW);
6054 }
6055
6056 error = ext4_xattr_ibody_find(inode, &i, &is);
6057 @@ -1047,7 +1047,7 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
6058 ext4_xattr_update_super_block(handle, inode->i_sb);
6059 inode->i_ctime = ext4_current_time(inode);
6060 if (!value)
6061 - EXT4_I(inode)->i_state &= ~EXT4_STATE_NO_EXPAND;
6062 + ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND);
6063 error = ext4_mark_iloc_dirty(handle, inode, &is.iloc);
6064 /*
6065 * The bh is consumed by ext4_mark_iloc_dirty, even with
6066 @@ -1062,7 +1062,7 @@ cleanup:
6067 brelse(is.iloc.bh);
6068 brelse(bs.bh);
6069 if (no_expand == 0)
6070 - EXT4_I(inode)->i_state &= ~EXT4_STATE_NO_EXPAND;
6071 + ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND);
6072 up_write(&EXT4_I(inode)->xattr_sem);
6073 return error;
6074 }
6075 @@ -1327,6 +1327,8 @@ retry:
6076 goto cleanup;
6077 kfree(b_entry_name);
6078 kfree(buffer);
6079 + b_entry_name = NULL;
6080 + buffer = NULL;
6081 brelse(is->iloc.bh);
6082 kfree(is);
6083 kfree(bs);
6084 diff --git a/fs/gfs2/acl.c b/fs/gfs2/acl.c
6085 index 3fc4e3a..2168da1 100644
6086 --- a/fs/gfs2/acl.c
6087 +++ b/fs/gfs2/acl.c
6088 @@ -12,6 +12,7 @@
6089 #include <linux/spinlock.h>
6090 #include <linux/completion.h>
6091 #include <linux/buffer_head.h>
6092 +#include <linux/xattr.h>
6093 #include <linux/posix_acl.h>
6094 #include <linux/posix_acl_xattr.h>
6095 #include <linux/gfs2_ondisk.h>
6096 @@ -26,61 +27,6 @@
6097 #include "trans.h"
6098 #include "util.h"
6099
6100 -#define ACL_ACCESS 1
6101 -#define ACL_DEFAULT 0
6102 -
6103 -int gfs2_acl_validate_set(struct gfs2_inode *ip, int access,
6104 - struct gfs2_ea_request *er, int *remove, mode_t *mode)
6105 -{
6106 - struct posix_acl *acl;
6107 - int error;
6108 -
6109 - error = gfs2_acl_validate_remove(ip, access);
6110 - if (error)
6111 - return error;
6112 -
6113 - if (!er->er_data)
6114 - return -EINVAL;
6115 -
6116 - acl = posix_acl_from_xattr(er->er_data, er->er_data_len);
6117 - if (IS_ERR(acl))
6118 - return PTR_ERR(acl);
6119 - if (!acl) {
6120 - *remove = 1;
6121 - return 0;
6122 - }
6123 -
6124 - error = posix_acl_valid(acl);
6125 - if (error)
6126 - goto out;
6127 -
6128 - if (access) {
6129 - error = posix_acl_equiv_mode(acl, mode);
6130 - if (!error)
6131 - *remove = 1;
6132 - else if (error > 0)
6133 - error = 0;
6134 - }
6135 -
6136 -out:
6137 - posix_acl_release(acl);
6138 - return error;
6139 -}
6140 -
6141 -int gfs2_acl_validate_remove(struct gfs2_inode *ip, int access)
6142 -{
6143 - if (!GFS2_SB(&ip->i_inode)->sd_args.ar_posix_acl)
6144 - return -EOPNOTSUPP;
6145 - if (!is_owner_or_cap(&ip->i_inode))
6146 - return -EPERM;
6147 - if (S_ISLNK(ip->i_inode.i_mode))
6148 - return -EOPNOTSUPP;
6149 - if (!access && !S_ISDIR(ip->i_inode.i_mode))
6150 - return -EACCES;
6151 -
6152 - return 0;
6153 -}
6154 -
6155 static int acl_get(struct gfs2_inode *ip, const char *name,
6156 struct posix_acl **acl, struct gfs2_ea_location *el,
6157 char **datap, unsigned int *lenp)
6158 @@ -277,3 +223,117 @@ out_brelse:
6159 return error;
6160 }
6161
6162 +static int gfs2_acl_type(const char *name)
6163 +{
6164 + if (strcmp(name, GFS2_POSIX_ACL_ACCESS) == 0)
6165 + return ACL_TYPE_ACCESS;
6166 + if (strcmp(name, GFS2_POSIX_ACL_DEFAULT) == 0)
6167 + return ACL_TYPE_DEFAULT;
6168 + return -EINVAL;
6169 +}
6170 +
6171 +static int gfs2_xattr_system_get(struct inode *inode, const char *name,
6172 + void *buffer, size_t size)
6173 +{
6174 + int type;
6175 +
6176 + type = gfs2_acl_type(name);
6177 + if (type < 0)
6178 + return type;
6179 +
6180 + return gfs2_xattr_get(inode, GFS2_EATYPE_SYS, name, buffer, size);
6181 +}
6182 +
6183 +static int gfs2_set_mode(struct inode *inode, mode_t mode)
6184 +{
6185 + int error = 0;
6186 +
6187 + if (mode != inode->i_mode) {
6188 + struct iattr iattr;
6189 +
6190 + iattr.ia_valid = ATTR_MODE;
6191 + iattr.ia_mode = mode;
6192 +
6193 + error = gfs2_setattr_simple(GFS2_I(inode), &iattr);
6194 + }
6195 +
6196 + return error;
6197 +}
6198 +
6199 +static int gfs2_xattr_system_set(struct inode *inode, const char *name,
6200 + const void *value, size_t size, int flags)
6201 +{
6202 + struct gfs2_sbd *sdp = GFS2_SB(inode);
6203 + struct posix_acl *acl = NULL;
6204 + int error = 0, type;
6205 +
6206 + if (!sdp->sd_args.ar_posix_acl)
6207 + return -EOPNOTSUPP;
6208 +
6209 + type = gfs2_acl_type(name);
6210 + if (type < 0)
6211 + return type;
6212 + if (flags & XATTR_CREATE)
6213 + return -EINVAL;
6214 + if (type == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode))
6215 + return value ? -EACCES : 0;
6216 + if ((current_fsuid() != inode->i_uid) && !capable(CAP_FOWNER))
6217 + return -EPERM;
6218 + if (S_ISLNK(inode->i_mode))
6219 + return -EOPNOTSUPP;
6220 +
6221 + if (!value)
6222 + goto set_acl;
6223 +
6224 + acl = posix_acl_from_xattr(value, size);
6225 + if (!acl) {
6226 + /*
6227 + * acl_set_file(3) may request that we set default ACLs with
6228 + * zero length -- defend (gracefully) against that here.
6229 + */
6230 + goto out;
6231 + }
6232 + if (IS_ERR(acl)) {
6233 + error = PTR_ERR(acl);
6234 + goto out;
6235 + }
6236 +
6237 + error = posix_acl_valid(acl);
6238 + if (error)
6239 + goto out_release;
6240 +
6241 + error = -EINVAL;
6242 + if (acl->a_count > GFS2_ACL_MAX_ENTRIES)
6243 + goto out_release;
6244 +
6245 + if (type == ACL_TYPE_ACCESS) {
6246 + mode_t mode = inode->i_mode;
6247 + error = posix_acl_equiv_mode(acl, &mode);
6248 +
6249 + if (error <= 0) {
6250 + posix_acl_release(acl);
6251 + acl = NULL;
6252 +
6253 + if (error < 0)
6254 + return error;
6255 + }
6256 +
6257 + error = gfs2_set_mode(inode, mode);
6258 + if (error)
6259 + goto out_release;
6260 + }
6261 +
6262 +set_acl:
6263 + error = gfs2_xattr_set(inode, GFS2_EATYPE_SYS, name, value, size, 0);
6264 +out_release:
6265 + posix_acl_release(acl);
6266 +out:
6267 + return error;
6268 +}
6269 +
6270 +struct xattr_handler gfs2_xattr_system_handler = {
6271 + .prefix = XATTR_SYSTEM_PREFIX,
6272 + .get = gfs2_xattr_system_get,
6273 + .set = gfs2_xattr_system_set,
6274 +};
6275 +
6276 diff --git a/fs/gfs2/acl.h b/fs/gfs2/acl.h
6277 index 6751930..cc95439 100644
6278 --- a/fs/gfs2/acl.h
6279 +++ b/fs/gfs2/acl.h
6280 @@ -13,26 +13,12 @@
6281 #include "incore.h"
6282
6283 #define GFS2_POSIX_ACL_ACCESS "posix_acl_access"
6284 -#define GFS2_POSIX_ACL_ACCESS_LEN 16
6285 #define GFS2_POSIX_ACL_DEFAULT "posix_acl_default"
6286 -#define GFS2_POSIX_ACL_DEFAULT_LEN 17
6287 +#define GFS2_ACL_MAX_ENTRIES 25
6288
6289 -#define GFS2_ACL_IS_ACCESS(name, len) \
6290 - ((len) == GFS2_POSIX_ACL_ACCESS_LEN && \
6291 - !memcmp(GFS2_POSIX_ACL_ACCESS, (name), (len)))
6292 -
6293 -#define GFS2_ACL_IS_DEFAULT(name, len) \
6294 - ((len) == GFS2_POSIX_ACL_DEFAULT_LEN && \
6295 - !memcmp(GFS2_POSIX_ACL_DEFAULT, (name), (len)))
6296 -
6297 -struct gfs2_ea_request;
6298 -
6299 -int gfs2_acl_validate_set(struct gfs2_inode *ip, int access,
6300 - struct gfs2_ea_request *er,
6301 - int *remove, mode_t *mode);
6302 -int gfs2_acl_validate_remove(struct gfs2_inode *ip, int access);
6303 -int gfs2_check_acl(struct inode *inode, int mask);
6304 -int gfs2_acl_create(struct gfs2_inode *dip, struct gfs2_inode *ip);
6305 -int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr);
6306 +extern int gfs2_check_acl(struct inode *inode, int mask);
6307 +extern int gfs2_acl_create(struct gfs2_inode *dip, struct gfs2_inode *ip);
6308 +extern int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr);
6309 +extern struct xattr_handler gfs2_xattr_system_handler;
6310
6311 #endif /* __ACL_DOT_H__ */
6312 diff --git a/fs/gfs2/xattr.c b/fs/gfs2/xattr.c
6313 index 8a0f8ef..6b80354 100644
6314 --- a/fs/gfs2/xattr.c
6315 +++ b/fs/gfs2/xattr.c
6316 @@ -1507,18 +1507,6 @@ static int gfs2_xattr_user_set(struct inode *inode, const char *name,
6317 return gfs2_xattr_set(inode, GFS2_EATYPE_USR, name, value, size, flags);
6318 }
6319
6320 -static int gfs2_xattr_system_get(struct inode *inode, const char *name,
6321 - void *buffer, size_t size)
6322 -{
6323 - return gfs2_xattr_get(inode, GFS2_EATYPE_SYS, name, buffer, size);
6324 -}
6325 -
6326 -static int gfs2_xattr_system_set(struct inode *inode, const char *name,
6327 - const void *value, size_t size, int flags)
6328 -{
6329 - return gfs2_xattr_set(inode, GFS2_EATYPE_SYS, name, value, size, flags);
6330 -}
6331 -
6332 static int gfs2_xattr_security_get(struct inode *inode, const char *name,
6333 void *buffer, size_t size)
6334 {
6335 @@ -1543,12 +1531,6 @@ static struct xattr_handler gfs2_xattr_security_handler = {
6336 .set = gfs2_xattr_security_set,
6337 };
6338
6339 -static struct xattr_handler gfs2_xattr_system_handler = {
6340 - .prefix = XATTR_SYSTEM_PREFIX,
6341 - .get = gfs2_xattr_system_get,
6342 - .set = gfs2_xattr_system_set,
6343 -};
6344 -
6345 struct xattr_handler *gfs2_xattr_handlers[] = {
6346 &gfs2_xattr_user_handler,
6347 &gfs2_xattr_security_handler,
6348 diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
6349 index ca0f5eb..8868493 100644