/[linux-patches]/genpatches-2.6/tags/3.2-6/1001_linux-3.2.2.patch
Gentoo

Contents of /genpatches-2.6/tags/3.2-6/1001_linux-3.2.2.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2076 - (show annotations) (download)
Thu Feb 2 11:46:07 2012 UTC (2 years, 6 months ago) by mpagano
File size: 218217 byte(s)
3.2-6 release
1 diff --git a/Makefile b/Makefile
2 index c5edffa..2f684da 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 3
7 PATCHLEVEL = 2
8 -SUBLEVEL = 1
9 +SUBLEVEL = 2
10 EXTRAVERSION =
11 NAME = Saber-toothed Squirrel
12
13 diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
14 index bfb4d01..5207035 100644
15 --- a/arch/ia64/kernel/acpi.c
16 +++ b/arch/ia64/kernel/acpi.c
17 @@ -429,22 +429,24 @@ static u32 __devinitdata pxm_flag[PXM_FLAG_LEN];
18 static struct acpi_table_slit __initdata *slit_table;
19 cpumask_t early_cpu_possible_map = CPU_MASK_NONE;
20
21 -static int get_processor_proximity_domain(struct acpi_srat_cpu_affinity *pa)
22 +static int __init
23 +get_processor_proximity_domain(struct acpi_srat_cpu_affinity *pa)
24 {
25 int pxm;
26
27 pxm = pa->proximity_domain_lo;
28 - if (ia64_platform_is("sn2"))
29 + if (ia64_platform_is("sn2") || acpi_srat_revision >= 2)
30 pxm += pa->proximity_domain_hi[0] << 8;
31 return pxm;
32 }
33
34 -static int get_memory_proximity_domain(struct acpi_srat_mem_affinity *ma)
35 +static int __init
36 +get_memory_proximity_domain(struct acpi_srat_mem_affinity *ma)
37 {
38 int pxm;
39
40 pxm = ma->proximity_domain;
41 - if (!ia64_platform_is("sn2"))
42 + if (!ia64_platform_is("sn2") && acpi_srat_revision <= 1)
43 pxm &= 0xff;
44
45 return pxm;
46 diff --git a/arch/score/kernel/entry.S b/arch/score/kernel/entry.S
47 index 577abba..83bb960 100644
48 --- a/arch/score/kernel/entry.S
49 +++ b/arch/score/kernel/entry.S
50 @@ -408,7 +408,7 @@ ENTRY(handle_sys)
51 sw r9, [r0, PT_EPC]
52
53 cmpi.c r27, __NR_syscalls # check syscall number
54 - bgtu illegal_syscall
55 + bgeu illegal_syscall
56
57 slli r8, r27, 2 # get syscall routine
58 la r11, sys_call_table
59 diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h
60 index 8e41071..49ad773 100644
61 --- a/arch/x86/include/asm/amd_nb.h
62 +++ b/arch/x86/include/asm/amd_nb.h
63 @@ -1,6 +1,7 @@
64 #ifndef _ASM_X86_AMD_NB_H
65 #define _ASM_X86_AMD_NB_H
66
67 +#include <linux/ioport.h>
68 #include <linux/pci.h>
69
70 struct amd_nb_bus_dev_range {
71 @@ -13,6 +14,7 @@ extern const struct pci_device_id amd_nb_misc_ids[];
72 extern const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[];
73
74 extern bool early_is_amd_nb(u32 value);
75 +extern struct resource *amd_get_mmconfig_range(struct resource *res);
76 extern int amd_cache_northbridges(void);
77 extern void amd_flush_garts(void);
78 extern int amd_numa_init(void);
79 diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h
80 index 8e862aa..1b82f7e 100644
81 --- a/arch/x86/include/asm/uv/uv_bau.h
82 +++ b/arch/x86/include/asm/uv/uv_bau.h
83 @@ -65,7 +65,7 @@
84 * UV2: Bit 19 selects between
85 * (0): 10 microsecond timebase and
86 * (1): 80 microseconds
87 - * we're using 655us, similar to UV1: 65 units of 10us
88 + * we're using 560us, similar to UV1: 65 units of 10us
89 */
90 #define UV1_INTD_SOFT_ACK_TIMEOUT_PERIOD (9UL)
91 #define UV2_INTD_SOFT_ACK_TIMEOUT_PERIOD (15UL)
92 @@ -167,6 +167,7 @@
93 #define FLUSH_RETRY_TIMEOUT 2
94 #define FLUSH_GIVEUP 3
95 #define FLUSH_COMPLETE 4
96 +#define FLUSH_RETRY_BUSYBUG 5
97
98 /*
99 * tuning the action when the numalink network is extremely delayed
100 @@ -235,10 +236,10 @@ struct bau_msg_payload {
101
102
103 /*
104 - * Message header: 16 bytes (128 bits) (bytes 0x30-0x3f of descriptor)
105 + * UV1 Message header: 16 bytes (128 bits) (bytes 0x30-0x3f of descriptor)
106 * see table 4.2.3.0.1 in broacast_assist spec.
107 */
108 -struct bau_msg_header {
109 +struct uv1_bau_msg_header {
110 unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */
111 /* bits 5:0 */
112 unsigned int base_dest_nasid:15; /* nasid of the first bit */
113 @@ -318,19 +319,87 @@ struct bau_msg_header {
114 };
115
116 /*
117 + * UV2 Message header: 16 bytes (128 bits) (bytes 0x30-0x3f of descriptor)
118 + * see figure 9-2 of harp_sys.pdf
119 + */
120 +struct uv2_bau_msg_header {
121 + unsigned int base_dest_nasid:15; /* nasid of the first bit */
122 + /* bits 14:0 */ /* in uvhub map */
123 + unsigned int dest_subnodeid:5; /* must be 0x10, for the LB */
124 + /* bits 19:15 */
125 + unsigned int rsvd_1:1; /* must be zero */
126 + /* bit 20 */
127 + /* Address bits 59:21 */
128 + /* bits 25:2 of address (44:21) are payload */
129 + /* these next 24 bits become bytes 12-14 of msg */
130 + /* bits 28:21 land in byte 12 */
131 + unsigned int replied_to:1; /* sent as 0 by the source to
132 + byte 12 */
133 + /* bit 21 */
134 + unsigned int msg_type:3; /* software type of the
135 + message */
136 + /* bits 24:22 */
137 + unsigned int canceled:1; /* message canceled, resource
138 + is to be freed*/
139 + /* bit 25 */
140 + unsigned int payload_1:3; /* not currently used */
141 + /* bits 28:26 */
142 +
143 + /* bits 36:29 land in byte 13 */
144 + unsigned int payload_2a:3; /* not currently used */
145 + unsigned int payload_2b:5; /* not currently used */
146 + /* bits 36:29 */
147 +
148 + /* bits 44:37 land in byte 14 */
149 + unsigned int payload_3:8; /* not currently used */
150 + /* bits 44:37 */
151 +
152 + unsigned int rsvd_2:7; /* reserved */
153 + /* bits 51:45 */
154 + unsigned int swack_flag:1; /* software acknowledge flag */
155 + /* bit 52 */
156 + unsigned int rsvd_3a:3; /* must be zero */
157 + unsigned int rsvd_3b:8; /* must be zero */
158 + unsigned int rsvd_3c:8; /* must be zero */
159 + unsigned int rsvd_3d:3; /* must be zero */
160 + /* bits 74:53 */
161 + unsigned int fairness:3; /* usually zero */
162 + /* bits 77:75 */
163 +
164 + unsigned int sequence:16; /* message sequence number */
165 + /* bits 93:78 Suppl_A */
166 + unsigned int chaining:1; /* next descriptor is part of
167 + this activation*/
168 + /* bit 94 */
169 + unsigned int multilevel:1; /* multi-level multicast
170 + format */
171 + /* bit 95 */
172 + unsigned int rsvd_4:24; /* ordered / source node /
173 + source subnode / aging
174 + must be zero */
175 + /* bits 119:96 */
176 + unsigned int command:8; /* message type */
177 + /* bits 127:120 */
178 +};
179 +
180 +/*
181 * The activation descriptor:
182 * The format of the message to send, plus all accompanying control
183 * Should be 64 bytes
184 */
185 struct bau_desc {
186 - struct pnmask distribution;
187 + struct pnmask distribution;
188 /*
189 * message template, consisting of header and payload:
190 */
191 - struct bau_msg_header header;
192 - struct bau_msg_payload payload;
193 + union bau_msg_header {
194 + struct uv1_bau_msg_header uv1_hdr;
195 + struct uv2_bau_msg_header uv2_hdr;
196 + } header;
197 +
198 + struct bau_msg_payload payload;
199 };
200 -/*
201 +/* UV1:
202 * -payload-- ---------header------
203 * bytes 0-11 bits 41-56 bits 58-81
204 * A B (2) C (3)
205 @@ -340,6 +409,16 @@ struct bau_desc {
206 * bytes 0-11 bytes 12-14 bytes 16-17 (byte 15 filled in by hw as vector)
207 * ------------payload queue-----------
208 */
209 +/* UV2:
210 + * -payload-- ---------header------
211 + * bytes 0-11 bits 70-78 bits 21-44
212 + * A B (2) C (3)
213 + *
214 + * A/B/C are moved to:
215 + * A C B
216 + * bytes 0-11 bytes 12-14 bytes 16-17 (byte 15 filled in by hw as vector)
217 + * ------------payload queue-----------
218 + */
219
220 /*
221 * The payload queue on the destination side is an array of these.
222 @@ -385,7 +464,6 @@ struct bau_pq_entry {
223 struct msg_desc {
224 struct bau_pq_entry *msg;
225 int msg_slot;
226 - int swack_slot;
227 struct bau_pq_entry *queue_first;
228 struct bau_pq_entry *queue_last;
229 };
230 @@ -439,6 +517,9 @@ struct ptc_stats {
231 unsigned long s_retry_messages; /* retry broadcasts */
232 unsigned long s_bau_reenabled; /* for bau enable/disable */
233 unsigned long s_bau_disabled; /* for bau enable/disable */
234 + unsigned long s_uv2_wars; /* uv2 workaround, perm. busy */
235 + unsigned long s_uv2_wars_hw; /* uv2 workaround, hiwater */
236 + unsigned long s_uv2_war_waits; /* uv2 workaround, long waits */
237 /* destination statistics */
238 unsigned long d_alltlb; /* times all tlb's on this
239 cpu were flushed */
240 @@ -511,9 +592,12 @@ struct bau_control {
241 short osnode;
242 short uvhub_cpu;
243 short uvhub;
244 + short uvhub_version;
245 short cpus_in_socket;
246 short cpus_in_uvhub;
247 short partition_base_pnode;
248 + short using_desc; /* an index, like uvhub_cpu */
249 + unsigned int inuse_map;
250 unsigned short message_number;
251 unsigned short uvhub_quiesce;
252 short socket_acknowledge_count[DEST_Q_SIZE];
253 @@ -531,6 +615,7 @@ struct bau_control {
254 int cong_response_us;
255 int cong_reps;
256 int cong_period;
257 + unsigned long clocks_per_100_usec;
258 cycles_t period_time;
259 long period_requests;
260 struct hub_and_pnode *thp;
261 @@ -591,6 +676,11 @@ static inline void write_mmr_sw_ack(unsigned long mr)
262 uv_write_local_mmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, mr);
263 }
264
265 +static inline void write_gmmr_sw_ack(int pnode, unsigned long mr)
266 +{
267 + write_gmmr(pnode, UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, mr);
268 +}
269 +
270 static inline unsigned long read_mmr_sw_ack(void)
271 {
272 return read_lmmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE);
273 diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
274 index 4c39baa..bae1efe 100644
275 --- a/arch/x86/kernel/amd_nb.c
276 +++ b/arch/x86/kernel/amd_nb.c
277 @@ -119,6 +119,37 @@ bool __init early_is_amd_nb(u32 device)
278 return false;
279 }
280
281 +struct resource *amd_get_mmconfig_range(struct resource *res)
282 +{
283 + u32 address;
284 + u64 base, msr;
285 + unsigned segn_busn_bits;
286 +
287 + if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
288 + return NULL;
289 +
290 + /* assume all cpus from fam10h have mmconfig */
291 + if (boot_cpu_data.x86 < 0x10)
292 + return NULL;
293 +
294 + address = MSR_FAM10H_MMIO_CONF_BASE;
295 + rdmsrl(address, msr);
296 +
297 + /* mmconfig is not enabled */
298 + if (!(msr & FAM10H_MMIO_CONF_ENABLE))
299 + return NULL;
300 +
301 + base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
302 +
303 + segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
304 + FAM10H_MMIO_CONF_BUSRANGE_MASK;
305 +
306 + res->flags = IORESOURCE_MEM;
307 + res->start = base;
308 + res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
309 + return res;
310 +}
311 +
312 int amd_get_subcaches(int cpu)
313 {
314 struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
315 diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
316 index 9d59bba..79b05b8 100644
317 --- a/arch/x86/kernel/apic/x2apic_uv_x.c
318 +++ b/arch/x86/kernel/apic/x2apic_uv_x.c
319 @@ -769,7 +769,12 @@ void __init uv_system_init(void)
320 for(i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++)
321 uv_possible_blades +=
322 hweight64(uv_read_local_mmr( UVH_NODE_PRESENT_TABLE + i * 8));
323 - printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades());
324 +
325 + /* uv_num_possible_blades() is really the hub count */
326 + printk(KERN_INFO "UV: Found %d blades, %d hubs\n",
327 + is_uv1_hub() ? uv_num_possible_blades() :
328 + (uv_num_possible_blades() + 1) / 2,
329 + uv_num_possible_blades());
330
331 bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades();
332 uv_blade_info = kzalloc(bytes, GFP_KERNEL);
333 diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
334 index 4b5ba85..845df68 100644
335 --- a/arch/x86/mm/mmap.c
336 +++ b/arch/x86/mm/mmap.c
337 @@ -75,9 +75,9 @@ static unsigned long mmap_rnd(void)
338 */
339 if (current->flags & PF_RANDOMIZE) {
340 if (mmap_is_ia32())
341 - rnd = (long)get_random_int() % (1<<8);
342 + rnd = get_random_int() % (1<<8);
343 else
344 - rnd = (long)(get_random_int() % (1<<28));
345 + rnd = get_random_int() % (1<<28);
346 }
347 return rnd << PAGE_SHIFT;
348 }
349 diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c
350 index 81dbfde..7efd0c6 100644
351 --- a/arch/x86/mm/srat.c
352 +++ b/arch/x86/mm/srat.c
353 @@ -104,6 +104,8 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
354 if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0)
355 return;
356 pxm = pa->proximity_domain_lo;
357 + if (acpi_srat_revision >= 2)
358 + pxm |= *((unsigned int*)pa->proximity_domain_hi) << 8;
359 node = setup_node(pxm);
360 if (node < 0) {
361 printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm);
362 @@ -155,6 +157,8 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
363 start = ma->base_address;
364 end = start + ma->length;
365 pxm = ma->proximity_domain;
366 + if (acpi_srat_revision <= 1)
367 + pxm &= 0xff;
368 node = setup_node(pxm);
369 if (node < 0) {
370 printk(KERN_ERR "SRAT: Too many proximity domains.\n");
371 diff --git a/arch/x86/pci/Makefile b/arch/x86/pci/Makefile
372 index 6b8759f..d24d3da 100644
373 --- a/arch/x86/pci/Makefile
374 +++ b/arch/x86/pci/Makefile
375 @@ -18,8 +18,9 @@ obj-$(CONFIG_X86_NUMAQ) += numaq_32.o
376 obj-$(CONFIG_X86_MRST) += mrst.o
377
378 obj-y += common.o early.o
379 -obj-y += amd_bus.o bus_numa.o
380 +obj-y += bus_numa.o
381
382 +obj-$(CONFIG_AMD_NB) += amd_bus.o
383 obj-$(CONFIG_PCI_CNB20LE_QUIRK) += broadcom_bus.o
384
385 ifeq ($(CONFIG_PCI_DEBUG),y)
386 diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
387 index 404f21a..f8348ab 100644
388 --- a/arch/x86/pci/acpi.c
389 +++ b/arch/x86/pci/acpi.c
390 @@ -149,7 +149,7 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
391 struct acpi_resource_address64 addr;
392 acpi_status status;
393 unsigned long flags;
394 - u64 start, end;
395 + u64 start, orig_end, end;
396
397 status = resource_to_addr(acpi_res, &addr);
398 if (!ACPI_SUCCESS(status))
399 @@ -165,7 +165,21 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
400 return AE_OK;
401
402 start = addr.minimum + addr.translation_offset;
403 - end = addr.maximum + addr.translation_offset;
404 + orig_end = end = addr.maximum + addr.translation_offset;
405 +
406 + /* Exclude non-addressable range or non-addressable portion of range */
407 + end = min(end, (u64)iomem_resource.end);
408 + if (end <= start) {
409 + dev_info(&info->bridge->dev,
410 + "host bridge window [%#llx-%#llx] "
411 + "(ignored, not CPU addressable)\n", start, orig_end);
412 + return AE_OK;
413 + } else if (orig_end != end) {
414 + dev_info(&info->bridge->dev,
415 + "host bridge window [%#llx-%#llx] "
416 + "([%#llx-%#llx] ignored, not CPU addressable)\n",
417 + start, orig_end, end + 1, orig_end);
418 + }
419
420 res = &info->res[info->res_num];
421 res->name = info->name;
422 diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
423 index 026e493..385a940 100644
424 --- a/arch/x86/pci/amd_bus.c
425 +++ b/arch/x86/pci/amd_bus.c
426 @@ -30,34 +30,6 @@ static struct pci_hostbridge_probe pci_probes[] __initdata = {
427 { 0, 0x18, PCI_VENDOR_ID_AMD, 0x1300 },
428 };
429
430 -static u64 __initdata fam10h_mmconf_start;
431 -static u64 __initdata fam10h_mmconf_end;
432 -static void __init get_pci_mmcfg_amd_fam10h_range(void)
433 -{
434 - u32 address;
435 - u64 base, msr;
436 - unsigned segn_busn_bits;
437 -
438 - /* assume all cpus from fam10h have mmconf */
439 - if (boot_cpu_data.x86 < 0x10)
440 - return;
441 -
442 - address = MSR_FAM10H_MMIO_CONF_BASE;
443 - rdmsrl(address, msr);
444 -
445 - /* mmconfig is not enable */
446 - if (!(msr & FAM10H_MMIO_CONF_ENABLE))
447 - return;
448 -
449 - base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
450 -
451 - segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
452 - FAM10H_MMIO_CONF_BUSRANGE_MASK;
453 -
454 - fam10h_mmconf_start = base;
455 - fam10h_mmconf_end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
456 -}
457 -
458 #define RANGE_NUM 16
459
460 /**
461 @@ -85,6 +57,9 @@ static int __init early_fill_mp_bus_info(void)
462 u64 val;
463 u32 address;
464 bool found;
465 + struct resource fam10h_mmconf_res, *fam10h_mmconf;
466 + u64 fam10h_mmconf_start;
467 + u64 fam10h_mmconf_end;
468
469 if (!early_pci_allowed())
470 return -1;
471 @@ -211,12 +186,17 @@ static int __init early_fill_mp_bus_info(void)
472 subtract_range(range, RANGE_NUM, 0, end);
473
474 /* get mmconfig */
475 - get_pci_mmcfg_amd_fam10h_range();
476 + fam10h_mmconf = amd_get_mmconfig_range(&fam10h_mmconf_res);
477 /* need to take out mmconf range */
478 - if (fam10h_mmconf_end) {
479 - printk(KERN_DEBUG "Fam 10h mmconf [%llx, %llx]\n", fam10h_mmconf_start, fam10h_mmconf_end);
480 + if (fam10h_mmconf) {
481 + printk(KERN_DEBUG "Fam 10h mmconf %pR\n", fam10h_mmconf);
482 + fam10h_mmconf_start = fam10h_mmconf->start;
483 + fam10h_mmconf_end = fam10h_mmconf->end;
484 subtract_range(range, RANGE_NUM, fam10h_mmconf_start,
485 fam10h_mmconf_end + 1);
486 + } else {
487 + fam10h_mmconf_start = 0;
488 + fam10h_mmconf_end = 0;
489 }
490
491 /* mmio resource */
492 diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
493 index 5b55219..9010ca7 100644
494 --- a/arch/x86/platform/uv/tlb_uv.c
495 +++ b/arch/x86/platform/uv/tlb_uv.c
496 @@ -157,13 +157,14 @@ static int __init uvhub_to_first_apicid(int uvhub)
497 * clear of the Timeout bit (as well) will free the resource. No reply will
498 * be sent (the hardware will only do one reply per message).
499 */
500 -static void reply_to_message(struct msg_desc *mdp, struct bau_control *bcp)
501 +static void reply_to_message(struct msg_desc *mdp, struct bau_control *bcp,
502 + int do_acknowledge)
503 {
504 unsigned long dw;
505 struct bau_pq_entry *msg;
506
507 msg = mdp->msg;
508 - if (!msg->canceled) {
509 + if (!msg->canceled && do_acknowledge) {
510 dw = (msg->swack_vec << UV_SW_ACK_NPENDING) | msg->swack_vec;
511 write_mmr_sw_ack(dw);
512 }
513 @@ -212,8 +213,8 @@ static void bau_process_retry_msg(struct msg_desc *mdp,
514 if (mmr & (msg_res << UV_SW_ACK_NPENDING)) {
515 unsigned long mr;
516 /*
517 - * is the resource timed out?
518 - * make everyone ignore the cancelled message.
519 + * Is the resource timed out?
520 + * Make everyone ignore the cancelled message.
521 */
522 msg2->canceled = 1;
523 stat->d_canceled++;
524 @@ -231,8 +232,8 @@ static void bau_process_retry_msg(struct msg_desc *mdp,
525 * Do all the things a cpu should do for a TLB shootdown message.
526 * Other cpu's may come here at the same time for this message.
527 */
528 -static void bau_process_message(struct msg_desc *mdp,
529 - struct bau_control *bcp)
530 +static void bau_process_message(struct msg_desc *mdp, struct bau_control *bcp,
531 + int do_acknowledge)
532 {
533 short socket_ack_count = 0;
534 short *sp;
535 @@ -284,8 +285,9 @@ static void bau_process_message(struct msg_desc *mdp,
536 if (msg_ack_count == bcp->cpus_in_uvhub) {
537 /*
538 * All cpus in uvhub saw it; reply
539 + * (unless we are in the UV2 workaround)
540 */
541 - reply_to_message(mdp, bcp);
542 + reply_to_message(mdp, bcp, do_acknowledge);
543 }
544 }
545
546 @@ -491,27 +493,138 @@ static int uv1_wait_completion(struct bau_desc *bau_desc,
547 /*
548 * UV2 has an extra bit of status in the ACTIVATION_STATUS_2 register.
549 */
550 -static unsigned long uv2_read_status(unsigned long offset, int rshft, int cpu)
551 +static unsigned long uv2_read_status(unsigned long offset, int rshft, int desc)
552 {
553 unsigned long descriptor_status;
554 unsigned long descriptor_status2;
555
556 descriptor_status = ((read_lmmr(offset) >> rshft) & UV_ACT_STATUS_MASK);
557 - descriptor_status2 = (read_mmr_uv2_status() >> cpu) & 0x1UL;
558 + descriptor_status2 = (read_mmr_uv2_status() >> desc) & 0x1UL;
559 descriptor_status = (descriptor_status << 1) | descriptor_status2;
560 return descriptor_status;
561 }
562
563 +/*
564 + * Return whether the status of the descriptor that is normally used for this
565 + * cpu (the one indexed by its hub-relative cpu number) is busy.
566 + * The status of the original 32 descriptors is always reflected in the 64
567 + * bits of UVH_LB_BAU_SB_ACTIVATION_STATUS_0.
568 + * The bit provided by the activation_status_2 register is irrelevant to
569 + * the status if it is only being tested for busy or not busy.
570 + */
571 +int normal_busy(struct bau_control *bcp)
572 +{
573 + int cpu = bcp->uvhub_cpu;
574 + int mmr_offset;
575 + int right_shift;
576 +
577 + mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
578 + right_shift = cpu * UV_ACT_STATUS_SIZE;
579 + return (((((read_lmmr(mmr_offset) >> right_shift) &
580 + UV_ACT_STATUS_MASK)) << 1) == UV2H_DESC_BUSY);
581 +}
582 +
583 +/*
584 + * Entered when a bau descriptor has gone into a permanent busy wait because
585 + * of a hardware bug.
586 + * Workaround the bug.
587 + */
588 +int handle_uv2_busy(struct bau_control *bcp)
589 +{
590 + int busy_one = bcp->using_desc;
591 + int normal = bcp->uvhub_cpu;
592 + int selected = -1;
593 + int i;
594 + unsigned long descriptor_status;
595 + unsigned long status;
596 + int mmr_offset;
597 + struct bau_desc *bau_desc_old;
598 + struct bau_desc *bau_desc_new;
599 + struct bau_control *hmaster = bcp->uvhub_master;
600 + struct ptc_stats *stat = bcp->statp;
601 + cycles_t ttm;
602 +
603 + stat->s_uv2_wars++;
604 + spin_lock(&hmaster->uvhub_lock);
605 + /* try for the original first */
606 + if (busy_one != normal) {
607 + if (!normal_busy(bcp))
608 + selected = normal;
609 + }
610 + if (selected < 0) {
611 + /* can't use the normal, select an alternate */
612 + mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_1;
613 + descriptor_status = read_lmmr(mmr_offset);
614 +
615 + /* scan available descriptors 32-63 */
616 + for (i = 0; i < UV_CPUS_PER_AS; i++) {
617 + if ((hmaster->inuse_map & (1 << i)) == 0) {
618 + status = ((descriptor_status >>
619 + (i * UV_ACT_STATUS_SIZE)) &
620 + UV_ACT_STATUS_MASK) << 1;
621 + if (status != UV2H_DESC_BUSY) {
622 + selected = i + UV_CPUS_PER_AS;
623 + break;
624 + }
625 + }
626 + }
627 + }
628 +
629 + if (busy_one != normal)
630 + /* mark the busy alternate as not in-use */
631 + hmaster->inuse_map &= ~(1 << (busy_one - UV_CPUS_PER_AS));
632 +
633 + if (selected >= 0) {
634 + /* switch to the selected descriptor */
635 + if (selected != normal) {
636 + /* set the selected alternate as in-use */
637 + hmaster->inuse_map |=
638 + (1 << (selected - UV_CPUS_PER_AS));
639 + if (selected > stat->s_uv2_wars_hw)
640 + stat->s_uv2_wars_hw = selected;
641 + }
642 + bau_desc_old = bcp->descriptor_base;
643 + bau_desc_old += (ITEMS_PER_DESC * busy_one);
644 + bcp->using_desc = selected;
645 + bau_desc_new = bcp->descriptor_base;
646 + bau_desc_new += (ITEMS_PER_DESC * selected);
647 + *bau_desc_new = *bau_desc_old;
648 + } else {
649 + /*
650 + * All are busy. Wait for the normal one for this cpu to
651 + * free up.
652 + */
653 + stat->s_uv2_war_waits++;
654 + spin_unlock(&hmaster->uvhub_lock);
655 + ttm = get_cycles();
656 + do {
657 + cpu_relax();
658 + } while (normal_busy(bcp));
659 + spin_lock(&hmaster->uvhub_lock);
660 + /* switch to the original descriptor */
661 + bcp->using_desc = normal;
662 + bau_desc_old = bcp->descriptor_base;
663 + bau_desc_old += (ITEMS_PER_DESC * bcp->using_desc);
664 + bcp->using_desc = (ITEMS_PER_DESC * normal);
665 + bau_desc_new = bcp->descriptor_base;
666 + bau_desc_new += (ITEMS_PER_DESC * normal);
667 + *bau_desc_new = *bau_desc_old; /* copy the entire descriptor */
668 + }
669 + spin_unlock(&hmaster->uvhub_lock);
670 + return FLUSH_RETRY_BUSYBUG;
671 +}
672 +
673 static int uv2_wait_completion(struct bau_desc *bau_desc,
674 unsigned long mmr_offset, int right_shift,
675 struct bau_control *bcp, long try)
676 {
677 unsigned long descriptor_stat;
678 cycles_t ttm;
679 - int cpu = bcp->uvhub_cpu;
680 + int desc = bcp->using_desc;
681 + long busy_reps = 0;
682 struct ptc_stats *stat = bcp->statp;
683
684 - descriptor_stat = uv2_read_status(mmr_offset, right_shift, cpu);
685 + descriptor_stat = uv2_read_status(mmr_offset, right_shift, desc);
686
687 /* spin on the status MMR, waiting for it to go idle */
688 while (descriptor_stat != UV2H_DESC_IDLE) {
689 @@ -542,12 +655,23 @@ static int uv2_wait_completion(struct bau_desc *bau_desc,
690 bcp->conseccompletes = 0;
691 return FLUSH_RETRY_TIMEOUT;
692 } else {
693 + busy_reps++;
694 + if (busy_reps > 1000000) {
695 + /* not to hammer on the clock */
696 + busy_reps = 0;
697 + ttm = get_cycles();
698 + if ((ttm - bcp->send_message) >
699 + (bcp->clocks_per_100_usec)) {
700 + return handle_uv2_busy(bcp);
701 + }
702 + }
703 /*
704 * descriptor_stat is still BUSY
705 */
706 cpu_relax();
707 }
708 - descriptor_stat = uv2_read_status(mmr_offset, right_shift, cpu);
709 + descriptor_stat = uv2_read_status(mmr_offset, right_shift,
710 + desc);
711 }
712 bcp->conseccompletes++;
713 return FLUSH_COMPLETE;
714 @@ -563,17 +687,17 @@ static int wait_completion(struct bau_desc *bau_desc,
715 {
716 int right_shift;
717 unsigned long mmr_offset;
718 - int cpu = bcp->uvhub_cpu;
719 + int desc = bcp->using_desc;
720
721 - if (cpu < UV_CPUS_PER_AS) {
722 + if (desc < UV_CPUS_PER_AS) {
723 mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
724 - right_shift = cpu * UV_ACT_STATUS_SIZE;
725 + right_shift = desc * UV_ACT_STATUS_SIZE;
726 } else {
727 mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_1;
728 - right_shift = ((cpu - UV_CPUS_PER_AS) * UV_ACT_STATUS_SIZE);
729 + right_shift = ((desc - UV_CPUS_PER_AS) * UV_ACT_STATUS_SIZE);
730 }
731
732 - if (is_uv1_hub())
733 + if (bcp->uvhub_version == 1)
734 return uv1_wait_completion(bau_desc, mmr_offset, right_shift,
735 bcp, try);
736 else
737 @@ -752,19 +876,22 @@ static void handle_cmplt(int completion_status, struct bau_desc *bau_desc,
738 * Returns 1 if it gives up entirely and the original cpu mask is to be
739 * returned to the kernel.
740 */
741 -int uv_flush_send_and_wait(struct bau_desc *bau_desc,
742 - struct cpumask *flush_mask, struct bau_control *bcp)
743 +int uv_flush_send_and_wait(struct cpumask *flush_mask, struct bau_control *bcp)
744 {
745 int seq_number = 0;
746 int completion_stat = 0;
747 + int uv1 = 0;
748 long try = 0;
749 unsigned long index;
750 cycles_t time1;
751 cycles_t time2;
752 struct ptc_stats *stat = bcp->statp;
753 struct bau_control *hmaster = bcp->uvhub_master;
754 + struct uv1_bau_msg_header *uv1_hdr = NULL;
755 + struct uv2_bau_msg_header *uv2_hdr = NULL;
756 + struct bau_desc *bau_desc;
757
758 - if (is_uv1_hub())
759 + if (bcp->uvhub_version == 1)
760 uv1_throttle(hmaster, stat);
761
762 while (hmaster->uvhub_quiesce)
763 @@ -772,22 +899,39 @@ int uv_flush_send_and_wait(struct bau_desc *bau_desc,
764
765 time1 = get_cycles();
766 do {
767 - if (try == 0) {
768 - bau_desc->header.msg_type = MSG_REGULAR;
769 + bau_desc = bcp->descriptor_base;
770 + bau_desc += (ITEMS_PER_DESC * bcp->using_desc);
771 + if (bcp->uvhub_version == 1) {
772 + uv1 = 1;
773 + uv1_hdr = &bau_desc->header.uv1_hdr;
774 + } else
775 + uv2_hdr = &bau_desc->header.uv2_hdr;
776 + if ((try == 0) || (completion_stat == FLUSH_RETRY_BUSYBUG)) {
777 + if (uv1)
778 + uv1_hdr->msg_type = MSG_REGULAR;
779 + else
780 + uv2_hdr->msg_type = MSG_REGULAR;
781 seq_number = bcp->message_number++;
782 } else {
783 - bau_desc->header.msg_type = MSG_RETRY;
784 + if (uv1)
785 + uv1_hdr->msg_type = MSG_RETRY;
786 + else
787 + uv2_hdr->msg_type = MSG_RETRY;
788 stat->s_retry_messages++;
789 }
790
791 - bau_desc->header.sequence = seq_number;
792 - index = (1UL << AS_PUSH_SHIFT) | bcp->uvhub_cpu;
793 + if (uv1)
794 + uv1_hdr->sequence = seq_number;
795 + else
796 + uv2_hdr->sequence = seq_number;
797 + index = (1UL << AS_PUSH_SHIFT) | bcp->using_desc;
798 bcp->send_message = get_cycles();
799
800 write_mmr_activation(index);
801
802 try++;
803 completion_stat = wait_completion(bau_desc, bcp, try);
804 + /* UV2: wait_completion() may change the bcp->using_desc */
805
806 handle_cmplt(completion_stat, bau_desc, bcp, hmaster, stat);
807
808 @@ -798,6 +942,7 @@ int uv_flush_send_and_wait(struct bau_desc *bau_desc,
809 }
810 cpu_relax();
811 } while ((completion_stat == FLUSH_RETRY_PLUGGED) ||
812 + (completion_stat == FLUSH_RETRY_BUSYBUG) ||
813 (completion_stat == FLUSH_RETRY_TIMEOUT));
814
815 time2 = get_cycles();
816 @@ -812,6 +957,7 @@ int uv_flush_send_and_wait(struct bau_desc *bau_desc,
817 record_send_stats(time1, time2, bcp, stat, completion_stat, try);
818
819 if (completion_stat == FLUSH_GIVEUP)
820 + /* FLUSH_GIVEUP will fall back to using IPI's for tlb flush */
821 return 1;
822 return 0;
823 }
824 @@ -967,7 +1113,7 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
825 stat->s_ntargself++;
826
827 bau_desc = bcp->descriptor_base;
828 - bau_desc += ITEMS_PER_DESC * bcp->uvhub_cpu;
829 + bau_desc += (ITEMS_PER_DESC * bcp->using_desc);
830 bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
831 if (set_distrib_bits(flush_mask, bcp, bau_desc, &locals, &remotes))
832 return NULL;
833 @@ -980,13 +1126,86 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
834 * uv_flush_send_and_wait returns 0 if all cpu's were messaged,
835 * or 1 if it gave up and the original cpumask should be returned.
836 */
837 - if (!uv_flush_send_and_wait(bau_desc, flush_mask, bcp))
838 + if (!uv_flush_send_and_wait(flush_mask, bcp))
839 return NULL;
840 else
841 return cpumask;
842 }
843
844 /*
845 + * Search the message queue for any 'other' message with the same software
846 + * acknowledge resource bit vector.
847 + */
848 +struct bau_pq_entry *find_another_by_swack(struct bau_pq_entry *msg,
849 + struct bau_control *bcp, unsigned char swack_vec)
850 +{
851 + struct bau_pq_entry *msg_next = msg + 1;
852 +
853 + if (msg_next > bcp->queue_last)
854 + msg_next = bcp->queue_first;
855 + while ((msg_next->swack_vec != 0) && (msg_next != msg)) {
856 + if (msg_next->swack_vec == swack_vec)
857 + return msg_next;
858 + msg_next++;
859 + if (msg_next > bcp->queue_last)
860 + msg_next = bcp->queue_first;
861 + }
862 + return NULL;
863 +}
864 +
865 +/*
866 + * UV2 needs to work around a bug in which an arriving message has not
867 + * set a bit in the UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE register.
868 + * Such a message must be ignored.
869 + */
870 +void process_uv2_message(struct msg_desc *mdp, struct bau_control *bcp)
871 +{
872 + unsigned long mmr_image;
873 + unsigned char swack_vec;
874 + struct bau_pq_entry *msg = mdp->msg;
875 + struct bau_pq_entry *other_msg;
876 +
877 + mmr_image = read_mmr_sw_ack();
878 + swack_vec = msg->swack_vec;
879 +
880 + if ((swack_vec & mmr_image) == 0) {
881 + /*
882 + * This message was assigned a swack resource, but no
883 + * reserved acknowlegment is pending.
884 + * The bug has prevented this message from setting the MMR.
885 + * And no other message has used the same sw_ack resource.
886 + * Do the requested shootdown but do not reply to the msg.
887 + * (the 0 means make no acknowledge)
888 + */
889 + bau_process_message(mdp, bcp, 0);
890 + return;
891 + }
892 +
893 + /*
894 + * Some message has set the MMR 'pending' bit; it might have been
895 + * another message. Look for that message.
896 + */
897 + other_msg = find_another_by_swack(msg, bcp, msg->swack_vec);
898 + if (other_msg) {
899 + /* There is another. Do not ack the current one. */
900 + bau_process_message(mdp, bcp, 0);
901 + /*
902 + * Let the natural processing of that message acknowledge
903 + * it. Don't get the processing of sw_ack's out of order.
904 + */
905 + return;
906 + }
907 +
908 + /*
909 + * There is no other message using this sw_ack, so it is safe to
910 + * acknowledge it.
911 + */
912 + bau_process_message(mdp, bcp, 1);
913 +
914 + return;
915 +}
916 +
917 +/*
918 * The BAU message interrupt comes here. (registered by set_intr_gate)
919 * See entry_64.S
920 *
921 @@ -1022,9 +1241,11 @@ void uv_bau_message_interrupt(struct pt_regs *regs)
922 count++;
923
924 msgdesc.msg_slot = msg - msgdesc.queue_first;
925 - msgdesc.swack_slot = ffs(msg->swack_vec) - 1;
926 msgdesc.msg = msg;
927 - bau_process_message(&msgdesc, bcp);
928 + if (bcp->uvhub_version == 2)
929 + process_uv2_message(&msgdesc, bcp);
930 + else
931 + bau_process_message(&msgdesc, bcp, 1);
932
933 msg++;
934 if (msg > msgdesc.queue_last)
935 @@ -1083,7 +1304,7 @@ static void __init enable_timeouts(void)
936 */
937 mmr_image |= (1L << SOFTACK_MSHIFT);
938 if (is_uv2_hub()) {
939 - mmr_image |= (1L << UV2_LEG_SHFT);
940 + mmr_image &= ~(1L << UV2_LEG_SHFT);
941 mmr_image |= (1L << UV2_EXT_SHFT);
942 }
943 write_mmr_misc_control(pnode, mmr_image);
944 @@ -1142,7 +1363,7 @@ static int ptc_seq_show(struct seq_file *file, void *data)
945 seq_printf(file,
946 "all one mult none retry canc nocan reset rcan ");
947 seq_printf(file,
948 - "disable enable\n");
949 + "disable enable wars warshw warwaits\n");
950 }
951 if (cpu < num_possible_cpus() && cpu_online(cpu)) {
952 stat = &per_cpu(ptcstats, cpu);
953 @@ -1173,8 +1394,10 @@ static int ptc_seq_show(struct seq_file *file, void *data)
954 stat->d_nomsg, stat->d_retries, stat->d_canceled,
955 stat->d_nocanceled, stat->d_resets,
956 stat->d_rcanceled);
957 - seq_printf(file, "%ld %ld\n",
958 - stat->s_bau_disabled, stat->s_bau_reenabled);
959 + seq_printf(file, "%ld %ld %ld %ld %ld\n",
960 + stat->s_bau_disabled, stat->s_bau_reenabled,
961 + stat->s_uv2_wars, stat->s_uv2_wars_hw,
962 + stat->s_uv2_war_waits);
963 }
964 return 0;
965 }
966 @@ -1432,12 +1655,15 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode)
967 {
968 int i;
969 int cpu;
970 + int uv1 = 0;
971 unsigned long gpa;
972 unsigned long m;
973 unsigned long n;
974 size_t dsize;
975 struct bau_desc *bau_desc;
976 struct bau_desc *bd2;
977 + struct uv1_bau_msg_header *uv1_hdr;
978 + struct uv2_bau_msg_header *uv2_hdr;
979 struct bau_control *bcp;
980
981 /*
982 @@ -1451,6 +1677,8 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode)
983 gpa = uv_gpa(bau_desc);
984 n = uv_gpa_to_gnode(gpa);
985 m = uv_gpa_to_offset(gpa);
986 + if (is_uv1_hub())
987 + uv1 = 1;
988
989 /* the 14-bit pnode */
990 write_mmr_descriptor_base(pnode, (n << UV_DESC_PSHIFT | m));
991 @@ -1461,21 +1689,33 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode)
992 */
993 for (i = 0, bd2 = bau_desc; i < (ADP_SZ * ITEMS_PER_DESC); i++, bd2++) {
994 memset(bd2, 0, sizeof(struct bau_desc));
995 - bd2->header.swack_flag = 1;
996 - /*
997 - * The base_dest_nasid set in the message header is the nasid
998 - * of the first uvhub in the partition. The bit map will
999 - * indicate destination pnode numbers relative to that base.
1000 - * They may not be consecutive if nasid striding is being used.
1001 - */
1002 - bd2->header.base_dest_nasid = UV_PNODE_TO_NASID(base_pnode);
1003 - bd2->header.dest_subnodeid = UV_LB_SUBNODEID;
1004 - bd2->header.command = UV_NET_ENDPOINT_INTD;
1005 - bd2->header.int_both = 1;
1006 - /*
1007 - * all others need to be set to zero:
1008 - * fairness chaining multilevel count replied_to
1009 - */
1010 + if (uv1) {
1011 + uv1_hdr = &bd2->header.uv1_hdr;
1012 + uv1_hdr->swack_flag = 1;
1013 + /*
1014 + * The base_dest_nasid set in the message header
1015 + * is the nasid of the first uvhub in the partition.
1016 + * The bit map will indicate destination pnode numbers
1017 + * relative to that base. They may not be consecutive
1018 + * if nasid striding is being used.
1019 + */
1020 + uv1_hdr->base_dest_nasid =
1021 + UV_PNODE_TO_NASID(base_pnode);
1022 + uv1_hdr->dest_subnodeid = UV_LB_SUBNODEID;
1023 + uv1_hdr->command = UV_NET_ENDPOINT_INTD;
1024 + uv1_hdr->int_both = 1;
1025 + /*
1026 + * all others need to be set to zero:
1027 + * fairness chaining multilevel count replied_to
1028 + */
1029 + } else {
1030 + uv2_hdr = &bd2->header.uv2_hdr;
1031 + uv2_hdr->swack_flag = 1;
1032 + uv2_hdr->base_dest_nasid =
1033 + UV_PNODE_TO_NASID(base_pnode);
1034 + uv2_hdr->dest_subnodeid = UV_LB_SUBNODEID;
1035 + uv2_hdr->command = UV_NET_ENDPOINT_INTD;
1036 + }
1037 }
1038 for_each_present_cpu(cpu) {
1039 if (pnode != uv_blade_to_pnode(uv_cpu_to_blade_id(cpu)))
1040 @@ -1531,6 +1771,7 @@ static void pq_init(int node, int pnode)
1041 write_mmr_payload_first(pnode, pn_first);
1042 write_mmr_payload_tail(pnode, first);
1043 write_mmr_payload_last(pnode, last);
1044 + write_gmmr_sw_ack(pnode, 0xffffUL);
1045
1046 /* in effect, all msg_type's are set to MSG_NOOP */
1047 memset(pqp, 0, sizeof(struct bau_pq_entry) * DEST_Q_SIZE);
1048 @@ -1584,14 +1825,14 @@ static int calculate_destination_timeout(void)
1049 ts_ns = base * mult1 * mult2;
1050 ret = ts_ns / 1000;
1051 } else {
1052 - /* 4 bits 0/1 for 10/80us, 3 bits of multiplier */
1053 - mmr_image = uv_read_local_mmr(UVH_AGING_PRESCALE_SEL);
1054 + /* 4 bits 0/1 for 10/80us base, 3 bits of multiplier */
1055 + mmr_image = uv_read_local_mmr(UVH_LB_BAU_MISC_CONTROL);
1056 mmr_image = (mmr_image & UV_SA_MASK) >> UV_SA_SHFT;
1057 if (mmr_image & (1L << UV2_ACK_UNITS_SHFT))
1058 - mult1 = 80;
1059 + base = 80;
1060 else
1061 - mult1 = 10;
1062 - base = mmr_image & UV2_ACK_MASK;
1063 + base = 10;
1064 + mult1 = mmr_image & UV2_ACK_MASK;
1065 ret = mult1 * base;
1066 }
1067 return ret;
1068 @@ -1618,6 +1859,7 @@ static void __init init_per_cpu_tunables(void)
1069 bcp->cong_response_us = congested_respns_us;
1070 bcp->cong_reps = congested_reps;
1071 bcp->cong_period = congested_period;
1072 + bcp->clocks_per_100_usec = usec_2_cycles(100);
1073 }
1074 }
1075
1076 @@ -1728,8 +1970,17 @@ static int scan_sock(struct socket_desc *sdp, struct uvhub_desc *bdp,
1077 bcp->cpus_in_socket = sdp->num_cpus;
1078 bcp->socket_master = *smasterp;
1079 bcp->uvhub = bdp->uvhub;
1080 + if (is_uv1_hub())
1081 + bcp->uvhub_version = 1;
1082 + else if (is_uv2_hub())
1083 + bcp->uvhub_version = 2;
1084 + else {
1085 + printk(KERN_EMERG "uvhub version not 1 or 2\n");
1086 + return 1;
1087 + }
1088 bcp->uvhub_master = *hmasterp;
1089 bcp->uvhub_cpu = uv_cpu_hub_info(cpu)->blade_processor_id;
1090 + bcp->using_desc = bcp->uvhub_cpu;
1091 if (bcp->uvhub_cpu >= MAX_CPUS_PER_UVHUB) {
1092 printk(KERN_EMERG "%d cpus per uvhub invalid\n",
1093 bcp->uvhub_cpu);
1094 @@ -1845,6 +2096,8 @@ static int __init uv_bau_init(void)
1095 uv_base_pnode = uv_blade_to_pnode(uvhub);
1096 }
1097
1098 + enable_timeouts();
1099 +
1100 if (init_per_cpu(nuvhubs, uv_base_pnode)) {
1101 nobau = 1;
1102 return 0;
1103 @@ -1855,7 +2108,6 @@ static int __init uv_bau_init(void)
1104 if (uv_blade_nr_possible_cpus(uvhub))
1105 init_uvhub(uvhub, vector, uv_base_pnode);
1106
1107 - enable_timeouts();
1108 alloc_intr_gate(vector, uv_bau_message_intr1);
1109
1110 for_each_possible_blade(uvhub) {
1111 @@ -1867,7 +2119,8 @@ static int __init uv_bau_init(void)
1112 val = 1L << 63;
1113 write_gmmr_activation(pnode, val);
1114 mmr = 1; /* should be 1 to broadcast to both sockets */
1115 - write_mmr_data_broadcast(pnode, mmr);
1116 + if (!is_uv1_hub())
1117 + write_mmr_data_broadcast(pnode, mmr);
1118 }
1119 }
1120
1121 diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
1122 index fbdf0d8..688be8a 100644
1123 --- a/block/scsi_ioctl.c
1124 +++ b/block/scsi_ioctl.c
1125 @@ -24,6 +24,7 @@
1126 #include <linux/capability.h>
1127 #include <linux/completion.h>
1128 #include <linux/cdrom.h>
1129 +#include <linux/ratelimit.h>
1130 #include <linux/slab.h>
1131 #include <linux/times.h>
1132 #include <asm/uaccess.h>
1133 @@ -690,6 +691,57 @@ int scsi_cmd_ioctl(struct request_queue *q, struct gendisk *bd_disk, fmode_t mod
1134 }
1135 EXPORT_SYMBOL(scsi_cmd_ioctl);
1136
1137 +int scsi_verify_blk_ioctl(struct block_device *bd, unsigned int cmd)
1138 +{
1139 + if (bd && bd == bd->bd_contains)
1140 + return 0;
1141 +
1142 + /* Actually none of these is particularly useful on a partition,
1143 + * but they are safe.
1144 + */
1145 + switch (cmd) {
1146 + case SCSI_IOCTL_GET_IDLUN:
1147 + case SCSI_IOCTL_GET_BUS_NUMBER:
1148 + case SCSI_IOCTL_GET_PCI:
1149 + case SCSI_IOCTL_PROBE_HOST:
1150 + case SG_GET_VERSION_NUM:
1151 + case SG_SET_TIMEOUT:
1152 + case SG_GET_TIMEOUT:
1153 + case SG_GET_RESERVED_SIZE:
1154 + case SG_SET_RESERVED_SIZE:
1155 + case SG_EMULATED_HOST:
1156 + return 0;
1157 + case CDROM_GET_CAPABILITY:
1158 + /* Keep this until we remove the printk below. udev sends it
1159 + * and we do not want to spam dmesg about it. CD-ROMs do
1160 + * not have partitions, so we get here only for disks.
1161 + */
1162 + return -ENOTTY;
1163 + default:
1164 + break;
1165 + }
1166 +
1167 + /* In particular, rule out all resets and host-specific ioctls. */
1168 + printk_ratelimited(KERN_WARNING
1169 + "%s: sending ioctl %x to a partition!\n", current->comm, cmd);
1170 +
1171 + return capable(CAP_SYS_RAWIO) ? 0 : -ENOTTY;
1172 +}
1173 +EXPORT_SYMBOL(scsi_verify_blk_ioctl);
1174 +
1175 +int scsi_cmd_blk_ioctl(struct block_device *bd, fmode_t mode,
1176 + unsigned int cmd, void __user *arg)
1177 +{
1178 + int ret;
1179 +
1180 + ret = scsi_verify_blk_ioctl(bd, cmd);
1181 + if (ret < 0)
1182 + return ret;
1183 +
1184 + return scsi_cmd_ioctl(bd->bd_disk->queue, bd->bd_disk, mode, cmd, arg);
1185 +}
1186 +EXPORT_SYMBOL(scsi_cmd_blk_ioctl);
1187 +
1188 static int __init blk_scsi_ioctl_init(void)
1189 {
1190 blk_set_cmd_filter_defaults(&blk_default_cmd_filter);
1191 diff --git a/drivers/acpi/acpica/dsargs.c b/drivers/acpi/acpica/dsargs.c
1192 index 8c7b997..42163d8 100644
1193 --- a/drivers/acpi/acpica/dsargs.c
1194 +++ b/drivers/acpi/acpica/dsargs.c
1195 @@ -387,5 +387,29 @@ acpi_status acpi_ds_get_region_arguments(union acpi_operand_object *obj_desc)
1196 status = acpi_ds_execute_arguments(node, node->parent,
1197 extra_desc->extra.aml_length,
1198 extra_desc->extra.aml_start);
1199 + if (ACPI_FAILURE(status)) {
1200 + return_ACPI_STATUS(status);
1201 + }
1202 +
1203 + /* Validate the region address/length via the host OS */
1204 +
1205 + status = acpi_os_validate_address(obj_desc->region.space_id,
1206 + obj_desc->region.address,
1207 + (acpi_size) obj_desc->region.length,
1208 + acpi_ut_get_node_name(node));
1209 +
1210 + if (ACPI_FAILURE(status)) {
1211 + /*
1212 + * Invalid address/length. We will emit an error message and mark
1213 + * the region as invalid, so that it will cause an additional error if
1214 + * it is ever used. Then return AE_OK.
1215 + */
1216 + ACPI_EXCEPTION((AE_INFO, status,
1217 + "During address validation of OpRegion [%4.4s]",
1218 + node->name.ascii));
1219 + obj_desc->common.flags |= AOPOBJ_INVALID;
1220 + status = AE_OK;
1221 + }
1222 +
1223 return_ACPI_STATUS(status);
1224 }
1225 diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
1226 index 3b5c318..e56f3be 100644
1227 --- a/drivers/acpi/numa.c
1228 +++ b/drivers/acpi/numa.c
1229 @@ -45,6 +45,8 @@ static int pxm_to_node_map[MAX_PXM_DOMAINS]
1230 static int node_to_pxm_map[MAX_NUMNODES]
1231 = { [0 ... MAX_NUMNODES - 1] = PXM_INVAL };
1232
1233 +unsigned char acpi_srat_revision __initdata;
1234 +
1235 int pxm_to_node(int pxm)
1236 {
1237 if (pxm < 0)
1238 @@ -255,9 +257,13 @@ acpi_parse_memory_affinity(struct acpi_subtable_header * header,
1239
1240 static int __init acpi_parse_srat(struct acpi_table_header *table)
1241 {
1242 + struct acpi_table_srat *srat;
1243 if (!table)
1244 return -EINVAL;
1245
1246 + srat = (struct acpi_table_srat *)table;
1247 + acpi_srat_revision = srat->header.revision;
1248 +
1249 /* Real work done in acpi_table_parse_srat below. */
1250
1251 return 0;
1252 diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
1253 index 3a0428e..c850de4 100644
1254 --- a/drivers/acpi/processor_core.c
1255 +++ b/drivers/acpi/processor_core.c
1256 @@ -173,8 +173,30 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
1257 apic_id = map_mat_entry(handle, type, acpi_id);
1258 if (apic_id == -1)
1259 apic_id = map_madt_entry(type, acpi_id);
1260 - if (apic_id == -1)
1261 - return apic_id;
1262 + if (apic_id == -1) {
1263 + /*
1264 + * On UP processor, there is no _MAT or MADT table.
1265 + * So above apic_id is always set to -1.
1266 + *
1267 + * BIOS may define multiple CPU handles even for UP processor.
1268 + * For example,
1269 + *
1270 + * Scope (_PR)
1271 + * {
1272 + * Processor (CPU0, 0x00, 0x00000410, 0x06) {}
1273 + * Processor (CPU1, 0x01, 0x00000410, 0x06) {}
1274 + * Processor (CPU2, 0x02, 0x00000410, 0x06) {}
1275 + * Processor (CPU3, 0x03, 0x00000410, 0x06) {}
1276 + * }
1277 + *
1278 + * Ignores apic_id and always return 0 for CPU0's handle.
1279 + * Return -1 for other CPU's handle.
1280 + */
1281 + if (acpi_id == 0)
1282 + return acpi_id;
1283 + else
1284 + return apic_id;
1285 + }
1286
1287 #ifdef CONFIG_SMP
1288 for_each_possible_cpu(i) {
1289 diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c
1290 index 990f5a8..48e06be 100644
1291 --- a/drivers/bcma/host_pci.c
1292 +++ b/drivers/bcma/host_pci.c
1293 @@ -227,11 +227,14 @@ static void bcma_host_pci_remove(struct pci_dev *dev)
1294 #ifdef CONFIG_PM
1295 static int bcma_host_pci_suspend(struct pci_dev *dev, pm_message_t state)
1296 {
1297 + struct bcma_bus *bus = pci_get_drvdata(dev);
1298 +
1299 /* Host specific */
1300 pci_save_state(dev);
1301 pci_disable_device(dev);
1302 pci_set_power_state(dev, pci_choose_state(dev, state));
1303
1304 + bus->mapped_core = NULL;
1305 return 0;
1306 }
1307
1308 diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
1309 index 587cce5..b0f553b 100644
1310 --- a/drivers/block/cciss.c
1311 +++ b/drivers/block/cciss.c
1312 @@ -1735,7 +1735,7 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
1313 case CCISS_BIG_PASSTHRU:
1314 return cciss_bigpassthru(h, argp);
1315
1316 - /* scsi_cmd_ioctl handles these, below, though some are not */
1317 + /* scsi_cmd_blk_ioctl handles these, below, though some are not */
1318 /* very meaningful for cciss. SG_IO is the main one people want. */
1319
1320 case SG_GET_VERSION_NUM:
1321 @@ -1746,9 +1746,9 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
1322 case SG_EMULATED_HOST:
1323 case SG_IO:
1324 case SCSI_IOCTL_SEND_COMMAND:
1325 - return scsi_cmd_ioctl(disk->queue, disk, mode, cmd, argp);
1326 + return scsi_cmd_blk_ioctl(bdev, mode, cmd, argp);
1327
1328 - /* scsi_cmd_ioctl would normally handle these, below, but */
1329 + /* scsi_cmd_blk_ioctl would normally handle these, below, but */
1330 /* they aren't a good fit for cciss, as CD-ROMs are */
1331 /* not supported, and we don't have any bus/target/lun */
1332 /* which we present to the kernel. */
1333 diff --git a/drivers/block/ub.c b/drivers/block/ub.c
1334 index 0e376d4..7333b9e 100644
1335 --- a/drivers/block/ub.c
1336 +++ b/drivers/block/ub.c
1337 @@ -1744,12 +1744,11 @@ static int ub_bd_release(struct gendisk *disk, fmode_t mode)
1338 static int ub_bd_ioctl(struct block_device *bdev, fmode_t mode,
1339 unsigned int cmd, unsigned long arg)
1340 {
1341 - struct gendisk *disk = bdev->bd_disk;
1342 void __user *usermem = (void __user *) arg;
1343 int ret;
1344
1345 mutex_lock(&ub_mutex);
1346 - ret = scsi_cmd_ioctl(disk->queue, disk, mode, cmd, usermem);
1347 + ret = scsi_cmd_blk_ioctl(bdev, mode, cmd, usermem);
1348 mutex_unlock(&ub_mutex);
1349
1350 return ret;
1351 diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
1352 index 4d0b70a..e46f2f7 100644
1353 --- a/drivers/block/virtio_blk.c
1354 +++ b/drivers/block/virtio_blk.c
1355 @@ -243,8 +243,8 @@ static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
1356 if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
1357 return -ENOTTY;
1358
1359 - return scsi_cmd_ioctl(disk->queue, disk, mode, cmd,
1360 - (void __user *)data);
1361 + return scsi_cmd_blk_ioctl(bdev, mode, cmd,
1362 + (void __user *)data);
1363 }
1364
1365 /* We provide getgeo only to please some old bootloader/partitioning tools */
1366 diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
1367 index f997c27..cedb231 100644
1368 --- a/drivers/cdrom/cdrom.c
1369 +++ b/drivers/cdrom/cdrom.c
1370 @@ -2747,12 +2747,11 @@ int cdrom_ioctl(struct cdrom_device_info *cdi, struct block_device *bdev,
1371 {
1372 void __user *argp = (void __user *)arg;
1373 int ret;
1374 - struct gendisk *disk = bdev->bd_disk;
1375
1376 /*
1377 * Try the generic SCSI command ioctl's first.
1378 */
1379 - ret = scsi_cmd_ioctl(disk->queue, disk, mode, cmd, argp);
1380 + ret = scsi_cmd_blk_ioctl(bdev, mode, cmd, argp);
1381 if (ret != -ENOTTY)
1382 return ret;
1383
1384 diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
1385 index bfc08f6..31b0d1a 100644
1386 --- a/drivers/gpu/drm/radeon/r100.c
1387 +++ b/drivers/gpu/drm/radeon/r100.c
1388 @@ -2177,6 +2177,7 @@ bool r100_gpu_is_lockup(struct radeon_device *rdev)
1389 void r100_bm_disable(struct radeon_device *rdev)
1390 {
1391 u32 tmp;
1392 + u16 tmp16;
1393
1394 /* disable bus mastering */
1395 tmp = RREG32(R_000030_BUS_CNTL);
1396 @@ -2187,8 +2188,8 @@ void r100_bm_disable(struct radeon_device *rdev)
1397 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040);
1398 tmp = RREG32(RADEON_BUS_CNTL);
1399 mdelay(1);
1400 - pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp);
1401 - pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB);
1402 + pci_read_config_word(rdev->pdev, 0x4, &tmp16);
1403 + pci_write_config_word(rdev->pdev, 0x4, tmp16 & 0xFFFB);
1404 mdelay(1);
1405 }
1406
1407 diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
1408 index f5ac7e7..c45d921 100644
1409 --- a/drivers/gpu/drm/radeon/r600_hdmi.c
1410 +++ b/drivers/gpu/drm/radeon/r600_hdmi.c
1411 @@ -196,6 +196,13 @@ static void r600_hdmi_videoinfoframe(
1412 frame[0xD] = (right_bar >> 8);
1413
1414 r600_hdmi_infoframe_checksum(0x82, 0x02, 0x0D, frame);
1415 + /* Our header values (type, version, length) should be alright, Intel
1416 + * is using the same. Checksum function also seems to be OK, it works
1417 + * fine for audio infoframe. However calculated value is always lower
1418 + * by 2 in comparison to fglrx. It breaks displaying anything in case
1419 + * of TVs that strictly check the checksum. Hack it manually here to
1420 + * workaround this issue. */
1421 + frame[0x0] += 2;
1422
1423 WREG32(offset+R600_HDMI_VIDEOINFOFRAME_0,
1424 frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
1425 diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
1426 index c4d00a1..9b39145 100644
1427 --- a/drivers/gpu/drm/radeon/radeon_device.c
1428 +++ b/drivers/gpu/drm/radeon/radeon_device.c
1429 @@ -224,8 +224,11 @@ int radeon_wb_init(struct radeon_device *rdev)
1430 if (radeon_no_wb == 1)
1431 rdev->wb.enabled = false;
1432 else {
1433 - /* often unreliable on AGP */
1434 if (rdev->flags & RADEON_IS_AGP) {
1435 + /* often unreliable on AGP */
1436 + rdev->wb.enabled = false;
1437 + } else if (rdev->family < CHIP_R300) {
1438 + /* often unreliable on pre-r300 */
1439 rdev->wb.enabled = false;
1440 } else {
1441 rdev->wb.enabled = true;
1442 diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
1443 index b1053d6..c259e21 100644
1444 --- a/drivers/gpu/drm/radeon/rs600.c
1445 +++ b/drivers/gpu/drm/radeon/rs600.c
1446 @@ -324,10 +324,10 @@ void rs600_hpd_fini(struct radeon_device *rdev)
1447
1448 void rs600_bm_disable(struct radeon_device *rdev)
1449 {
1450 - u32 tmp;
1451 + u16 tmp;
1452
1453 /* disable bus mastering */
1454 - pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp);
1455 + pci_read_config_word(rdev->pdev, 0x4, &tmp);
1456 pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB);
1457 mdelay(1);
1458 }
1459 diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
1460 index 22a4a05..d21f6d0 100644
1461 --- a/drivers/hid/Kconfig
1462 +++ b/drivers/hid/Kconfig
1463 @@ -335,6 +335,7 @@ config HID_MULTITOUCH
1464 Say Y here if you have one of the following devices:
1465 - 3M PCT touch screens
1466 - ActionStar dual touch panels
1467 + - Atmel panels
1468 - Cando dual touch panels
1469 - Chunghwa panels
1470 - CVTouch panels
1471 @@ -355,6 +356,7 @@ config HID_MULTITOUCH
1472 - Touch International Panels
1473 - Unitec Panels
1474 - XAT optical touch panels
1475 + - Xiroku optical touch panels
1476
1477 If unsure, say N.
1478
1479 @@ -620,6 +622,7 @@ config HID_WIIMOTE
1480 depends on BT_HIDP
1481 depends on LEDS_CLASS
1482 select POWER_SUPPLY
1483 + select INPUT_FF_MEMLESS
1484 ---help---
1485 Support for the Nintendo Wii Remote bluetooth device.
1486
1487 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
1488 index af35384..bb656d8 100644
1489 --- a/drivers/hid/hid-core.c
1490 +++ b/drivers/hid/hid-core.c
1491 @@ -362,7 +362,7 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
1492
1493 case HID_GLOBAL_ITEM_TAG_REPORT_SIZE:
1494 parser->global.report_size = item_udata(item);
1495 - if (parser->global.report_size > 32) {
1496 + if (parser->global.report_size > 96) {
1497 dbg_hid("invalid report_size %d\n",
1498 parser->global.report_size);
1499 return -1;
1500 @@ -1404,11 +1404,13 @@ static const struct hid_device_id hid_have_special_driver[] = {
1501 { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_TRUETOUCH) },
1502 { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) },
1503 { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0011) },
1504 - { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) },
1505 - { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1) },
1506 - { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH2) },
1507 - { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH3) },
1508 - { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH4) },
1509 + { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480D) },
1510 + { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480E) },
1511 + { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_720C) },
1512 + { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_726B) },
1513 + { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72A1) },
1514 + { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7302) },
1515 + { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001) },
1516 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
1517 { HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2515) },
1518 { HID_USB_DEVICE(USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II) },
1519 @@ -1423,6 +1425,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
1520 { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
1521 { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
1522 { HID_USB_DEVICE(USB_VENDOR_ID_HANVON, USB_DEVICE_ID_HANVON_MULTITOUCH) },
1523 + { HID_USB_DEVICE(USB_VENDOR_ID_HANVON_ALT, USB_DEVICE_ID_HANVON_ALT_MULTITOUCH) },
1524 { HID_USB_DEVICE(USB_VENDOR_ID_IDEACOM, USB_DEVICE_ID_IDEACOM_IDC6650) },
1525 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK, USB_DEVICE_ID_HOLTEK_ON_LINE_GRIP) },
1526 { HID_USB_DEVICE(USB_VENDOR_ID_ILITEK, USB_DEVICE_ID_ILITEK_MULTITOUCH) },
1527 @@ -1549,6 +1552,15 @@ static const struct hid_device_id hid_have_special_driver[] = {
1528 { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH) },
1529 { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH) },
1530 { HID_USB_DEVICE(USB_VENDOR_ID_XAT, USB_DEVICE_ID_XAT_CSR) },
1531 + { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_SPX) },
1532 + { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_MPX) },
1533 + { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_CSR) },
1534 + { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_SPX1) },
1535 + { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_MPX1) },
1536 + { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_CSR1) },
1537 + { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_SPX2) },
1538 + { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_MPX2) },
1539 + { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_CSR2) },
1540 { HID_USB_DEVICE(USB_VENDOR_ID_X_TENSIONS, USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE) },
1541 { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) },
1542 { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) },
1543 diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
1544 index 4a441a6..00cabb3 100644
1545 --- a/drivers/hid/hid-ids.h
1546 +++ b/drivers/hid/hid-ids.h
1547 @@ -21,6 +21,7 @@
1548 #define USB_VENDOR_ID_3M 0x0596
1549 #define USB_DEVICE_ID_3M1968 0x0500
1550 #define USB_DEVICE_ID_3M2256 0x0502
1551 +#define USB_DEVICE_ID_3M3266 0x0506
1552
1553 #define USB_VENDOR_ID_A4TECH 0x09da
1554 #define USB_DEVICE_ID_A4TECH_WCP32PU 0x0006
1555 @@ -145,6 +146,9 @@
1556 #define USB_DEVICE_ID_ATEN_4PORTKVM 0x2205
1557 #define USB_DEVICE_ID_ATEN_4PORTKVMC 0x2208
1558
1559 +#define USB_VENDOR_ID_ATMEL 0x03eb
1560 +#define USB_DEVICE_ID_ATMEL_MULTITOUCH 0x211c
1561 +
1562 #define USB_VENDOR_ID_AVERMEDIA 0x07ca
1563 #define USB_DEVICE_ID_AVER_FM_MR800 0xb800
1564
1565 @@ -230,11 +234,14 @@
1566
1567 #define USB_VENDOR_ID_DWAV 0x0eef
1568 #define USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER 0x0001
1569 -#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH 0x480d
1570 -#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1 0x720c
1571 -#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH2 0x72a1
1572 -#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH3 0x480e
1573 -#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH4 0x726b
1574 +#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480D 0x480d
1575 +#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480E 0x480e
1576 +#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_720C 0x720c
1577 +#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_726B 0x726b
1578 +#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72A1 0x72a1
1579 +#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72FA 0x72fa
1580 +#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7302 0x7302
1581 +#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001 0xa001
1582
1583 #define USB_VENDOR_ID_ELECOM 0x056e
1584 #define USB_DEVICE_ID_ELECOM_BM084 0x0061
1585 @@ -356,6 +363,9 @@
1586 #define USB_VENDOR_ID_HANVON 0x20b3
1587 #define USB_DEVICE_ID_HANVON_MULTITOUCH 0x0a18
1588
1589 +#define USB_VENDOR_ID_HANVON_ALT 0x22ed
1590 +#define USB_DEVICE_ID_HANVON_ALT_MULTITOUCH 0x1010
1591 +
1592 #define USB_VENDOR_ID_HAPP 0x078b
1593 #define USB_DEVICE_ID_UGCI_DRIVING 0x0010
1594 #define USB_DEVICE_ID_UGCI_FLYING 0x0020
1595 @@ -707,6 +717,17 @@
1596 #define USB_VENDOR_ID_XAT 0x2505
1597 #define USB_DEVICE_ID_XAT_CSR 0x0220
1598
1599 +#define USB_VENDOR_ID_XIROKU 0x1477
1600 +#define USB_DEVICE_ID_XIROKU_SPX 0x1006
1601 +#define USB_DEVICE_ID_XIROKU_MPX 0x1007
1602 +#define USB_DEVICE_ID_XIROKU_CSR 0x100e
1603 +#define USB_DEVICE_ID_XIROKU_SPX1 0x1021
1604 +#define USB_DEVICE_ID_XIROKU_CSR1 0x1022
1605 +#define USB_DEVICE_ID_XIROKU_MPX1 0x1023
1606 +#define USB_DEVICE_ID_XIROKU_SPX2 0x1024
1607 +#define USB_DEVICE_ID_XIROKU_CSR2 0x1025
1608 +#define USB_DEVICE_ID_XIROKU_MPX2 0x1026
1609 +
1610 #define USB_VENDOR_ID_YEALINK 0x6993
1611 #define USB_DEVICE_ID_YEALINK_P1K_P4K_B2K 0xb001
1612
1613 diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
1614 index f1c909f..995fc4c 100644
1615 --- a/drivers/hid/hid-multitouch.c
1616 +++ b/drivers/hid/hid-multitouch.c
1617 @@ -609,12 +609,20 @@ static const struct hid_device_id mt_devices[] = {
1618 { .driver_data = MT_CLS_3M,
1619 HID_USB_DEVICE(USB_VENDOR_ID_3M,
1620 USB_DEVICE_ID_3M2256) },
1621 + { .driver_data = MT_CLS_3M,
1622 + HID_USB_DEVICE(USB_VENDOR_ID_3M,
1623 + USB_DEVICE_ID_3M3266) },
1624
1625 /* ActionStar panels */
1626 { .driver_data = MT_CLS_DEFAULT,
1627 HID_USB_DEVICE(USB_VENDOR_ID_ACTIONSTAR,
1628 USB_DEVICE_ID_ACTIONSTAR_1011) },
1629
1630 + /* Atmel panels */
1631 + { .driver_data = MT_CLS_SERIAL,
1632 + HID_USB_DEVICE(USB_VENDOR_ID_ATMEL,
1633 + USB_DEVICE_ID_ATMEL_MULTITOUCH) },
1634 +
1635 /* Cando panels */
1636 { .driver_data = MT_CLS_DUAL_INRANGE_CONTACTNUMBER,
1637 HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
1638 @@ -645,23 +653,32 @@ static const struct hid_device_id mt_devices[] = {
1639 USB_DEVICE_ID_CYPRESS_TRUETOUCH) },
1640
1641 /* eGalax devices (resistive) */
1642 - { .driver_data = MT_CLS_EGALAX,
1643 + { .driver_data = MT_CLS_EGALAX,
1644 HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
1645 - USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) },
1646 - { .driver_data = MT_CLS_EGALAX,
1647 + USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480D) },
1648 + { .driver_data = MT_CLS_EGALAX,
1649 HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
1650 - USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH3) },
1651 + USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480E) },
1652
1653 /* eGalax devices (capacitive) */
1654 - { .driver_data = MT_CLS_EGALAX,
1655 + { .driver_data = MT_CLS_EGALAX,
1656 + HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
1657 + USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_720C) },
1658 + { .driver_data = MT_CLS_EGALAX,
1659 HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
1660 - USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1) },
1661 - { .driver_data = MT_CLS_EGALAX,
1662 + USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_726B) },
1663 + { .driver_data = MT_CLS_EGALAX,
1664 HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
1665 - USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH2) },
1666 - { .driver_data = MT_CLS_EGALAX,
1667 + USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72A1) },
1668 + { .driver_data = MT_CLS_EGALAX,
1669 HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
1670 - USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH4) },
1671 + USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72FA) },
1672 + { .driver_data = MT_CLS_EGALAX,
1673 + HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
1674 + USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7302) },
1675 + { .driver_data = MT_CLS_EGALAX,
1676 + HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
1677 + USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001) },
1678
1679 /* Elo TouchSystems IntelliTouch Plus panel */
1680 { .driver_data = MT_CLS_DUAL_NSMU_CONTACTID,
1681 @@ -678,6 +695,11 @@ static const struct hid_device_id mt_devices[] = {
1682 HID_USB_DEVICE(USB_VENDOR_ID_GOODTOUCH,
1683 USB_DEVICE_ID_GOODTOUCH_000f) },
1684
1685 + /* Hanvon panels */
1686 + { .driver_data = MT_CLS_DUAL_INRANGE_CONTACTID,
1687 + HID_USB_DEVICE(USB_VENDOR_ID_HANVON_ALT,
1688 + USB_DEVICE_ID_HANVON_ALT_MULTITOUCH) },
1689 +
1690 /* Ideacom panel */
1691 { .driver_data = MT_CLS_SERIAL,
1692 HID_USB_DEVICE(USB_VENDOR_ID_IDEACOM,
1693 @@ -758,6 +780,35 @@ static const struct hid_device_id mt_devices[] = {
1694 HID_USB_DEVICE(USB_VENDOR_ID_XAT,
1695 USB_DEVICE_ID_XAT_CSR) },
1696
1697 + /* Xiroku */
1698 + { .driver_data = MT_CLS_DEFAULT,
1699 + HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
1700 + USB_DEVICE_ID_XIROKU_SPX) },
1701 + { .driver_data = MT_CLS_DEFAULT,
1702 + HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
1703 + USB_DEVICE_ID_XIROKU_MPX) },
1704 + { .driver_data = MT_CLS_DEFAULT,
1705 + HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
1706 + USB_DEVICE_ID_XIROKU_CSR) },
1707 + { .driver_data = MT_CLS_DEFAULT,
1708 + HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
1709 + USB_DEVICE_ID_XIROKU_SPX1) },
1710 + { .driver_data = MT_CLS_DEFAULT,
1711 + HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
1712 + USB_DEVICE_ID_XIROKU_MPX1) },
1713 + { .driver_data = MT_CLS_DEFAULT,
1714 + HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
1715 + USB_DEVICE_ID_XIROKU_CSR1) },
1716 + { .driver_data = MT_CLS_DEFAULT,
1717 + HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
1718 + USB_DEVICE_ID_XIROKU_SPX2) },
1719 + { .driver_data = MT_CLS_DEFAULT,
1720 + HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
1721 + USB_DEVICE_ID_XIROKU_MPX2) },
1722 + { .driver_data = MT_CLS_DEFAULT,
1723 + HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
1724 + USB_DEVICE_ID_XIROKU_CSR2) },
1725 +
1726 { }
1727 };
1728 MODULE_DEVICE_TABLE(hid, mt_devices);
1729 diff --git a/drivers/i2c/busses/i2c-ali1535.c b/drivers/i2c/busses/i2c-ali1535.c
1730 index b6807db..5b667e5 100644
1731 --- a/drivers/i2c/busses/i2c-ali1535.c
1732 +++ b/drivers/i2c/busses/i2c-ali1535.c
1733 @@ -140,7 +140,7 @@ static unsigned short ali1535_smba;
1734 defined to make the transition easier. */
1735 static int __devinit ali1535_setup(struct pci_dev *dev)
1736 {
1737 - int retval = -ENODEV;
1738 + int retval;
1739 unsigned char temp;
1740
1741 /* Check the following things:
1742 @@ -155,6 +155,7 @@ static int __devinit ali1535_setup(struct pci_dev *dev)
1743 if (ali1535_smba == 0) {
1744 dev_warn(&dev->dev,
1745 "ALI1535_smb region uninitialized - upgrade BIOS?\n");
1746 + retval = -ENODEV;
1747 goto exit;
1748 }
1749
1750 @@ -167,6 +168,7 @@ static int __devinit ali1535_setup(struct pci_dev *dev)
1751 ali1535_driver.name)) {
1752 dev_err(&dev->dev, "ALI1535_smb region 0x%x already in use!\n",
1753 ali1535_smba);
1754 + retval = -EBUSY;
1755 goto exit;
1756 }
1757
1758 @@ -174,6 +176,7 @@ static int __devinit ali1535_setup(struct pci_dev *dev)
1759 pci_read_config_byte(dev, SMBCFG, &temp);
1760 if ((temp & ALI1535_SMBIO_EN) == 0) {
1761 dev_err(&dev->dev, "SMB device not enabled - upgrade BIOS?\n");
1762 + retval = -ENODEV;
1763 goto exit_free;
1764 }
1765
1766 @@ -181,6 +184,7 @@ static int __devinit ali1535_setup(struct pci_dev *dev)
1767 pci_read_config_byte(dev, SMBHSTCFG, &temp);
1768 if ((temp & 1) == 0) {
1769 dev_err(&dev->dev, "SMBus controller not enabled - upgrade BIOS?\n");
1770 + retval = -ENODEV;
1771 goto exit_free;
1772 }
1773
1774 @@ -198,12 +202,11 @@ static int __devinit ali1535_setup(struct pci_dev *dev)
1775 dev_dbg(&dev->dev, "SMBREV = 0x%X\n", temp);
1776 dev_dbg(&dev->dev, "ALI1535_smba = 0x%X\n", ali1535_smba);
1777
1778 - retval = 0;
1779 -exit:
1780 - return retval;
1781 + return 0;
1782
1783 exit_free:
1784 release_region(ali1535_smba, ALI1535_SMB_IOSIZE);
1785 +exit:
1786 return retval;
1787 }
1788
1789 diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
1790 index 18936ac..730215e 100644
1791 --- a/drivers/i2c/busses/i2c-eg20t.c
1792 +++ b/drivers/i2c/busses/i2c-eg20t.c
1793 @@ -243,7 +243,7 @@ static void pch_i2c_init(struct i2c_algo_pch_data *adap)
1794 if (pch_clk > PCH_MAX_CLK)
1795 pch_clk = 62500;
1796
1797 - pch_i2cbc = (pch_clk + (pch_i2c_speed * 4)) / pch_i2c_speed * 8;
1798 + pch_i2cbc = (pch_clk + (pch_i2c_speed * 4)) / (pch_i2c_speed * 8);
1799 /* Set transfer speed in I2CBC */
1800 iowrite32(pch_i2cbc, p + PCH_I2CBC);
1801
1802 diff --git a/drivers/i2c/busses/i2c-nforce2.c b/drivers/i2c/busses/i2c-nforce2.c
1803 index ff1e127..4853b52 100644
1804 --- a/drivers/i2c/busses/i2c-nforce2.c
1805 +++ b/drivers/i2c/busses/i2c-nforce2.c
1806 @@ -356,7 +356,7 @@ static int __devinit nforce2_probe_smb (struct pci_dev *dev, int bar,
1807 error = acpi_check_region(smbus->base, smbus->size,
1808 nforce2_driver.name);
1809 if (error)
1810 - return -1;
1811 + return error;
1812
1813 if (!request_region(smbus->base, smbus->size, nforce2_driver.name)) {
1814 dev_err(&smbus->adapter.dev, "Error requesting region %02x .. %02X for %s\n",
1815 diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
1816 index fa23faa..257c1a5 100644
1817 --- a/drivers/i2c/busses/i2c-omap.c
1818 +++ b/drivers/i2c/busses/i2c-omap.c
1819 @@ -235,7 +235,7 @@ static const u8 reg_map_ip_v2[] = {
1820 [OMAP_I2C_BUF_REG] = 0x94,
1821 [OMAP_I2C_CNT_REG] = 0x98,
1822 [OMAP_I2C_DATA_REG] = 0x9c,
1823 - [OMAP_I2C_SYSC_REG] = 0x20,
1824 + [OMAP_I2C_SYSC_REG] = 0x10,
1825 [OMAP_I2C_CON_REG] = 0xa4,
1826 [OMAP_I2C_OA_REG] = 0xa8,
1827 [OMAP_I2C_SA_REG] = 0xac,
1828 diff --git a/drivers/i2c/busses/i2c-sis5595.c b/drivers/i2c/busses/i2c-sis5595.c
1829 index 4375866..6d60284 100644
1830 --- a/drivers/i2c/busses/i2c-sis5595.c
1831 +++ b/drivers/i2c/busses/i2c-sis5595.c
1832 @@ -147,7 +147,7 @@ static int __devinit sis5595_setup(struct pci_dev *SIS5595_dev)
1833 u16 a;
1834 u8 val;
1835 int *i;
1836 - int retval = -ENODEV;
1837 + int retval;
1838
1839 /* Look for imposters */
1840 for (i = blacklist; *i != 0; i++) {
1841 @@ -223,7 +223,7 @@ static int __devinit sis5595_setup(struct pci_dev *SIS5595_dev)
1842
1843 error:
1844 release_region(sis5595_base + SMB_INDEX, 2);
1845 - return retval;
1846 + return -ENODEV;
1847 }
1848
1849 static int sis5595_transaction(struct i2c_adapter *adap)
1850 diff --git a/drivers/i2c/busses/i2c-sis630.c b/drivers/i2c/busses/i2c-sis630.c
1851 index e6f539e..b617fd0 100644
1852 --- a/drivers/i2c/busses/i2c-sis630.c
1853 +++ b/drivers/i2c/busses/i2c-sis630.c
1854 @@ -393,7 +393,7 @@ static int __devinit sis630_setup(struct pci_dev *sis630_dev)
1855 {
1856 unsigned char b;
1857 struct pci_dev *dummy = NULL;
1858 - int retval = -ENODEV, i;
1859 + int retval, i;
1860
1861 /* check for supported SiS devices */
1862 for (i=0; supported[i] > 0 ; i++) {
1863 @@ -418,18 +418,21 @@ static int __devinit sis630_setup(struct pci_dev *sis630_dev)
1864 */
1865 if (pci_read_config_byte(sis630_dev, SIS630_BIOS_CTL_REG,&b)) {
1866 dev_err(&sis630_dev->dev, "Error: Can't read bios ctl reg\n");
1867 + retval = -ENODEV;
1868 goto exit;
1869 }
1870 /* if ACPI already enabled , do nothing */
1871 if (!(b & 0x80) &&
1872 pci_write_config_byte(sis630_dev, SIS630_BIOS_CTL_REG, b | 0x80)) {
1873 dev_err(&sis630_dev->dev, "Error: Can't enable ACPI\n");
1874 + retval = -ENODEV;
1875 goto exit;
1876 }
1877
1878 /* Determine the ACPI base address */
1879 if (pci_read_config_word(sis630_dev,SIS630_ACPI_BASE_REG,&acpi_base)) {
1880 dev_err(&sis630_dev->dev, "Error: Can't determine ACPI base address\n");
1881 + retval = -ENODEV;
1882 goto exit;
1883 }
1884
1885 @@ -445,6 +448,7 @@ static int __devinit sis630_setup(struct pci_dev *sis630_dev)
1886 sis630_driver.name)) {
1887 dev_err(&sis630_dev->dev, "SMBus registers 0x%04x-0x%04x already "
1888 "in use!\n", acpi_base + SMB_STS, acpi_base + SMB_SAA);
1889 + retval = -EBUSY;
1890 goto exit;
1891 }
1892
1893 diff --git a/drivers/i2c/busses/i2c-viapro.c b/drivers/i2c/busses/i2c-viapro.c
1894 index 0b012f1..58261d4 100644
1895 --- a/drivers/i2c/busses/i2c-viapro.c
1896 +++ b/drivers/i2c/busses/i2c-viapro.c
1897 @@ -324,7 +324,7 @@ static int __devinit vt596_probe(struct pci_dev *pdev,
1898 const struct pci_device_id *id)
1899 {
1900 unsigned char temp;
1901 - int error = -ENODEV;
1902 + int error;
1903
1904 /* Determine the address of the SMBus areas */
1905 if (force_addr) {
1906 @@ -390,6 +390,7 @@ found:
1907 dev_err(&pdev->dev, "SMBUS: Error: Host SMBus "
1908 "controller not enabled! - upgrade BIOS or "
1909 "use force=1\n");
1910 + error = -ENODEV;
1911 goto release_region;
1912 }
1913 }
1914 @@ -422,9 +423,11 @@ found:
1915 "SMBus Via Pro adapter at %04x", vt596_smba);
1916
1917 vt596_pdev = pci_dev_get(pdev);
1918 - if (i2c_add_adapter(&vt596_adapter)) {
1919 + error = i2c_add_adapter(&vt596_adapter);
1920 + if (error) {
1921 pci_dev_put(vt596_pdev);
1922 vt596_pdev = NULL;
1923 + goto release_region;
1924 }
1925
1926 /* Always return failure here. This is to allow other drivers to bind
1927 diff --git a/drivers/ide/ide-floppy_ioctl.c b/drivers/ide/ide-floppy_ioctl.c
1928 index d267b7a..a22ca84 100644
1929 --- a/drivers/ide/ide-floppy_ioctl.c
1930 +++ b/drivers/ide/ide-floppy_ioctl.c
1931 @@ -292,8 +292,7 @@ int ide_floppy_ioctl(ide_drive_t *drive, struct block_device *bdev,
1932 * and CDROM_SEND_PACKET (legacy) ioctls
1933 */
1934 if (cmd != CDROM_SEND_PACKET && cmd != SCSI_IOCTL_SEND_COMMAND)
1935 - err = scsi_cmd_ioctl(bdev->bd_disk->queue, bdev->bd_disk,
1936 - mode, cmd, argp);
1937 + err = scsi_cmd_blk_ioctl(bdev, mode, cmd, argp);
1938
1939 if (err == -ENOTTY)
1940 err = generic_ide_ioctl(drive, bdev, cmd, arg);
1941 diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
1942 index 5d2f8e1..5b39216 100644
1943 --- a/drivers/idle/intel_idle.c
1944 +++ b/drivers/idle/intel_idle.c
1945 @@ -348,7 +348,8 @@ static int intel_idle_probe(void)
1946 cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates);
1947
1948 if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
1949 - !(ecx & CPUID5_ECX_INTERRUPT_BREAK))
1950 + !(ecx & CPUID5_ECX_INTERRUPT_BREAK) ||
1951 + !mwait_substates)
1952 return -ENODEV;
1953
1954 pr_debug(PREFIX "MWAIT substates: 0x%x\n", mwait_substates);
1955 @@ -394,7 +395,7 @@ static int intel_idle_probe(void)
1956 if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */
1957 lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE;
1958 else {
1959 - smp_call_function(__setup_broadcast_timer, (void *)true, 1);
1960 + on_each_cpu(__setup_broadcast_timer, (void *)true, 1);
1961 register_cpu_notifier(&setup_broadcast_notifier);
1962 }
1963
1964 @@ -471,7 +472,7 @@ static int intel_idle_cpuidle_driver_init(void)
1965 }
1966
1967 if (auto_demotion_disable_flags)
1968 - smp_call_function(auto_demotion_disable, NULL, 1);
1969 + on_each_cpu(auto_demotion_disable, NULL, 1);
1970
1971 return 0;
1972 }
1973 @@ -568,7 +569,7 @@ static void __exit intel_idle_exit(void)
1974 cpuidle_unregister_driver(&intel_idle_driver);
1975
1976 if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE) {
1977 - smp_call_function(__setup_broadcast_timer, (void *)false, 1);
1978 + on_each_cpu(__setup_broadcast_timer, (void *)false, 1);
1979 unregister_cpu_notifier(&setup_broadcast_notifier);
1980 }
1981
1982 diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
1983 index f84c080..9fb18c1 100644
1984 --- a/drivers/md/dm-flakey.c
1985 +++ b/drivers/md/dm-flakey.c
1986 @@ -368,8 +368,17 @@ static int flakey_status(struct dm_target *ti, status_type_t type,
1987 static int flakey_ioctl(struct dm_target *ti, unsigned int cmd, unsigned long arg)
1988 {
1989 struct flakey_c *fc = ti->private;
1990 + struct dm_dev *dev = fc->dev;
1991 + int r = 0;
1992
1993 - return __blkdev_driver_ioctl(fc->dev->bdev, fc->dev->mode, cmd, arg);
1994 + /*
1995 + * Only pass ioctls through if the device sizes match exactly.
1996 + */
1997 + if (fc->start ||
1998 + ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
1999 + r = scsi_verify_blk_ioctl(NULL, cmd);
2000 +
2001 + return r ? : __blkdev_driver_ioctl(dev->bdev, dev->mode, cmd, arg);
2002 }
2003
2004 static int flakey_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
2005 diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
2006 index 3921e3b..9728839 100644
2007 --- a/drivers/md/dm-linear.c
2008 +++ b/drivers/md/dm-linear.c
2009 @@ -116,7 +116,17 @@ static int linear_ioctl(struct dm_target *ti, unsigned int cmd,
2010 unsigned long arg)
2011 {
2012 struct linear_c *lc = (struct linear_c *) ti->private;
2013 - return __blkdev_driver_ioctl(lc->dev->bdev, lc->dev->mode, cmd, arg);
2014 + struct dm_dev *dev = lc->dev;
2015 + int r = 0;
2016 +
2017 + /*
2018 + * Only pass ioctls through if the device sizes match exactly.
2019 + */
2020 + if (lc->start ||
2021 + ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
2022 + r = scsi_verify_blk_ioctl(NULL, cmd);
2023 +
2024 + return r ? : __blkdev_driver_ioctl(dev->bdev, dev->mode, cmd, arg);
2025 }
2026
2027 static int linear_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
2028 diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
2029 index 5e0090e..801d92d 100644
2030 --- a/drivers/md/dm-mpath.c
2031 +++ b/drivers/md/dm-mpath.c
2032 @@ -1520,6 +1520,12 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
2033
2034 spin_unlock_irqrestore(&m->lock, flags);
2035
2036 + /*
2037 + * Only pass ioctls through if the device sizes match exactly.
2038 + */
2039 + if (!r && ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT)
2040 + r = scsi_verify_blk_ioctl(NULL, cmd);
2041 +
2042 return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
2043 }
2044
2045 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
2046 index ede2461..7d9e071 100644
2047 --- a/drivers/md/raid1.c
2048 +++ b/drivers/md/raid1.c
2049 @@ -525,8 +525,17 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
2050 if (test_bit(WriteMostly, &rdev->flags)) {
2051 /* Don't balance among write-mostly, just
2052 * use the first as a last resort */
2053 - if (best_disk < 0)
2054 + if (best_disk < 0) {
2055 + if (is_badblock(rdev, this_sector, sectors,
2056 + &first_bad, &bad_sectors)) {
2057 + if (first_bad < this_sector)
2058 + /* Cannot use this */
2059 + continue;
2060 + best_good_sectors = first_bad - this_sector;
2061 + } else
2062 + best_good_sectors = sectors;
2063 best_disk = disk;
2064 + }
2065 continue;
2066 }
2067 /* This is a reasonable device to use. It might
2068 diff --git a/drivers/media/video/cx23885/cx23885-dvb.c b/drivers/media/video/cx23885/cx23885-dvb.c
2069 index bcb45be..f0482b2 100644
2070 --- a/drivers/media/video/cx23885/cx23885-dvb.c
2071 +++ b/drivers/media/video/cx23885/cx23885-dvb.c
2072 @@ -940,6 +940,11 @@ static int dvb_register(struct cx23885_tsport *port)
2073
2074 fe = dvb_attach(xc4000_attach, fe0->dvb.frontend,
2075 &dev->i2c_bus[1].i2c_adap, &cfg);
2076 + if (!fe) {
2077 + printk(KERN_ERR "%s/2: xc4000 attach failed\n",
2078 + dev->name);
2079 + goto frontend_detach;
2080 + }
2081 }
2082 break;
2083 case CX23885_BOARD_TBS_6920:
2084 diff --git a/drivers/media/video/cx88/cx88-cards.c b/drivers/media/video/cx88/cx88-cards.c
2085 index 0d719fa..3929d93 100644
2086 --- a/drivers/media/video/cx88/cx88-cards.c
2087 +++ b/drivers/media/video/cx88/cx88-cards.c
2088 @@ -1573,8 +1573,8 @@ static const struct cx88_board cx88_boards[] = {
2089 .name = "Pinnacle Hybrid PCTV",
2090 .tuner_type = TUNER_XC2028,
2091 .tuner_addr = 0x61,
2092 - .radio_type = TUNER_XC2028,
2093 - .radio_addr = 0x61,
2094 + .radio_type = UNSET,
2095 + .radio_addr = ADDR_UNSET,
2096 .input = { {
2097 .type = CX88_VMUX_TELEVISION,
2098 .vmux = 0,
2099 @@ -1611,8 +1611,8 @@ static const struct cx88_board cx88_boards[] = {
2100 .name = "Leadtek TV2000 XP Global",
2101 .tuner_type = TUNER_XC2028,
2102 .tuner_addr = 0x61,
2103 - .radio_type = TUNER_XC2028,
2104 - .radio_addr = 0x61,
2105 + .radio_type = UNSET,
2106 + .radio_addr = ADDR_UNSET,
2107 .input = { {
2108 .type = CX88_VMUX_TELEVISION,
2109 .vmux = 0,
2110 @@ -2043,8 +2043,8 @@ static const struct cx88_board cx88_boards[] = {
2111 .name = "Terratec Cinergy HT PCI MKII",
2112 .tuner_type = TUNER_XC2028,
2113 .tuner_addr = 0x61,
2114 - .radio_type = TUNER_XC2028,
2115 - .radio_addr = 0x61,
2116 + .radio_type = UNSET,
2117 + .radio_addr = ADDR_UNSET,
2118 .input = { {
2119 .type = CX88_VMUX_TELEVISION,
2120 .vmux = 0,
2121 @@ -2082,9 +2082,9 @@ static const struct cx88_board cx88_boards[] = {
2122 [CX88_BOARD_WINFAST_DTV1800H] = {
2123 .name = "Leadtek WinFast DTV1800 Hybrid",
2124 .tuner_type = TUNER_XC2028,
2125 - .radio_type = TUNER_XC2028,
2126 + .radio_type = UNSET,
2127 .tuner_addr = 0x61,
2128 - .radio_addr = 0x61,
2129 + .radio_addr = ADDR_UNSET,
2130 /*
2131 * GPIO setting
2132 *
2133 @@ -2123,9 +2123,9 @@ static const struct cx88_board cx88_boards[] = {
2134 [CX88_BOARD_WINFAST_DTV1800H_XC4000] = {
2135 .name = "Leadtek WinFast DTV1800 H (XC4000)",
2136 .tuner_type = TUNER_XC4000,
2137 - .radio_type = TUNER_XC4000,
2138 + .radio_type = UNSET,
2139 .tuner_addr = 0x61,
2140 - .radio_addr = 0x61,
2141 + .radio_addr = ADDR_UNSET,
2142 /*
2143 * GPIO setting
2144 *
2145 @@ -2164,9 +2164,9 @@ static const struct cx88_board cx88_boards[] = {
2146 [CX88_BOARD_WINFAST_DTV2000H_PLUS] = {
2147 .name = "Leadtek WinFast DTV2000 H PLUS",
2148 .tuner_type = TUNER_XC4000,
2149 - .radio_type = TUNER_XC4000,
2150 + .radio_type = UNSET,
2151 .tuner_addr = 0x61,
2152 - .radio_addr = 0x61,
2153 + .radio_addr = ADDR_UNSET,
2154 /*
2155 * GPIO
2156 * 2: 1: mute audio
2157 diff --git a/drivers/media/video/uvc/uvc_v4l2.c b/drivers/media/video/uvc/uvc_v4l2.c
2158 index dadf11f..cf7788f 100644
2159 --- a/drivers/media/video/uvc/uvc_v4l2.c
2160 +++ b/drivers/media/video/uvc/uvc_v4l2.c
2161 @@ -58,6 +58,15 @@ static int uvc_ioctl_ctrl_map(struct uvc_video_chain *chain,
2162 break;
2163
2164 case V4L2_CTRL_TYPE_MENU:
2165 + /* Prevent excessive memory consumption, as well as integer
2166 + * overflows.
2167 + */
2168 + if (xmap->menu_count == 0 ||
2169 + xmap->menu_count > UVC_MAX_CONTROL_MENU_ENTRIES) {
2170 + ret = -EINVAL;
2171 + goto done;
2172 + }
2173 +
2174 size = xmap->menu_count * sizeof(*map->menu_info);
2175 map->menu_info = kmalloc(size, GFP_KERNEL);
2176 if (map->menu_info == NULL) {
2177 diff --git a/drivers/media/video/uvc/uvcvideo.h b/drivers/media/video/uvc/uvcvideo.h
2178 index 4c1392e..bc446ba 100644
2179 --- a/drivers/media/video/uvc/uvcvideo.h
2180 +++ b/drivers/media/video/uvc/uvcvideo.h
2181 @@ -113,6 +113,7 @@
2182
2183 /* Maximum allowed number of control mappings per device */
2184 #define UVC_MAX_CONTROL_MAPPINGS 1024
2185 +#define UVC_MAX_CONTROL_MENU_ENTRIES 32
2186
2187 /* Devices quirks */
2188 #define UVC_QUIRK_STATUS_INTERVAL 0x00000001
2189 diff --git a/drivers/media/video/v4l2-ioctl.c b/drivers/media/video/v4l2-ioctl.c
2190 index e1da8fc..639abee 100644
2191 --- a/drivers/media/video/v4l2-ioctl.c
2192 +++ b/drivers/media/video/v4l2-ioctl.c
2193 @@ -2226,6 +2226,10 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
2194 struct v4l2_ext_controls *ctrls = parg;
2195
2196 if (ctrls->count != 0) {
2197 + if (ctrls->count > V4L2_CID_MAX_CTRLS) {
2198 + ret = -EINVAL;
2199 + break;
2200 + }
2201 *user_ptr = (void __user *)ctrls->controls;
2202 *kernel_ptr = (void *)&ctrls->controls;
2203 *array_size = sizeof(struct v4l2_ext_control)
2204 diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
2205 index d240427..fb7c27f 100644
2206 --- a/drivers/mmc/core/mmc.c
2207 +++ b/drivers/mmc/core/mmc.c
2208 @@ -1048,7 +1048,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
2209 *
2210 * WARNING: eMMC rules are NOT the same as SD DDR
2211 */
2212 - if (ddr == EXT_CSD_CARD_TYPE_DDR_1_2V) {
2213 + if (ddr == MMC_1_2V_DDR_MODE) {
2214 err = mmc_set_signal_voltage(host,
2215 MMC_SIGNAL_VOLTAGE_120, 0);
2216 if (err)
2217 diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
2218 index 19ed580..6ce32a7 100644
2219 --- a/drivers/mmc/host/sdhci.c
2220 +++ b/drivers/mmc/host/sdhci.c
2221 @@ -1364,8 +1364,7 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
2222 if ((ios->timing == MMC_TIMING_UHS_SDR50) ||
2223 (ios->timing == MMC_TIMING_UHS_SDR104) ||
2224 (ios->timing == MMC_TIMING_UHS_DDR50) ||
2225 - (ios->timing == MMC_TIMING_UHS_SDR25) ||
2226 - (ios->timing == MMC_TIMING_UHS_SDR12))
2227 + (ios->timing == MMC_TIMING_UHS_SDR25))
2228 ctrl |= SDHCI_CTRL_HISPD;
2229
2230 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2231 @@ -2336,9 +2335,8 @@ int sdhci_suspend_host(struct sdhci_host *host)
2232 /* Disable tuning since we are suspending */
2233 if (host->version >= SDHCI_SPEC_300 && host->tuning_count &&
2234 host->tuning_mode == SDHCI_TUNING_MODE_1) {
2235 + del_timer_sync(&host->tuning_timer);
2236 host->flags &= ~SDHCI_NEEDS_RETUNING;
2237 - mod_timer(&host->tuning_timer, jiffies +
2238 - host->tuning_count * HZ);
2239 }
2240
2241 ret = mmc_suspend_host(host->mmc);
2242 diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
2243 index ed8b5e7..424ca5f 100644
2244 --- a/drivers/mtd/mtd_blkdevs.c
2245 +++ b/drivers/mtd/mtd_blkdevs.c
2246 @@ -215,7 +215,7 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode)
2247
2248 mutex_lock(&dev->lock);
2249
2250 - if (dev->open++)
2251 + if (dev->open)
2252 goto unlock;
2253
2254 kref_get(&dev->ref);
2255 @@ -235,6 +235,7 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode)
2256 goto error_release;
2257
2258 unlock:
2259 + dev->open++;
2260 mutex_unlock(&dev->lock);
2261 blktrans_dev_put(dev);
2262 return ret;
2263 diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
2264 index 1e2fa62..f3cdce9 100644
2265 --- a/drivers/mtd/mtdoops.c
2266 +++ b/drivers/mtd/mtdoops.c
2267 @@ -253,6 +253,9 @@ static void find_next_position(struct mtdoops_context *cxt)
2268 size_t retlen;
2269
2270 for (page = 0; page < cxt->oops_pages; page++) {
2271 + if (mtd->block_isbad &&
2272 + mtd->block_isbad(mtd, page * record_size))
2273 + continue;
2274 /* Assume the page is used */
2275 mark_page_used(cxt, page);
2276 ret = mtd->read(mtd, page * record_size, MTDOOPS_HEADER_SIZE,
2277 @@ -369,7 +372,7 @@ static void mtdoops_notify_add(struct mtd_info *mtd)
2278
2279 /* oops_page_used is a bit field */
2280 cxt->oops_page_used = vmalloc(DIV_ROUND_UP(mtdoops_pages,
2281 - BITS_PER_LONG));
2282 + BITS_PER_LONG) * sizeof(unsigned long));
2283 if (!cxt->oops_page_used) {
2284 printk(KERN_ERR "mtdoops: could not allocate page array\n");
2285 return;
2286 diff --git a/drivers/mtd/tests/mtd_stresstest.c b/drivers/mtd/tests/mtd_stresstest.c
2287 index 52ffd91..811642f 100644
2288 --- a/drivers/mtd/tests/mtd_stresstest.c
2289 +++ b/drivers/mtd/tests/mtd_stresstest.c
2290 @@ -284,6 +284,12 @@ static int __init mtd_stresstest_init(void)
2291 (unsigned long long)mtd->size, mtd->erasesize,
2292 pgsize, ebcnt, pgcnt, mtd->oobsize);
2293
2294 + if (ebcnt < 2) {
2295 + printk(PRINT_PREF "error: need at least 2 eraseblocks\n");
2296 + err = -ENOSPC;
2297 + goto out_put_mtd;
2298 + }
2299 +
2300 /* Read or write up 2 eraseblocks at a time */
2301 bufsize = mtd->erasesize * 2;
2302
2303 @@ -322,6 +328,7 @@ out:
2304 kfree(bbt);
2305 vfree(writebuf);
2306 vfree(readbuf);
2307 +out_put_mtd:
2308 put_mtd_device(mtd);
2309 if (err)
2310 printk(PRINT_PREF "error %d occurred\n", err);
2311 diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
2312 index 3320a50..ad76592 100644
2313 --- a/drivers/mtd/ubi/cdev.c
2314 +++ b/drivers/mtd/ubi/cdev.c
2315 @@ -632,6 +632,9 @@ static int verify_mkvol_req(const struct ubi_device *ubi,
2316 if (req->alignment != 1 && n)
2317 goto bad;
2318
2319 + if (!req->name[0] || !req->name_len)
2320 + goto bad;
2321 +
2322 if (req->name_len > UBI_VOL_NAME_MAX) {
2323 err = -ENAMETOOLONG;
2324 goto bad;
2325 diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
2326 index 64fbb00..ead2cd1 100644
2327 --- a/drivers/mtd/ubi/debug.h
2328 +++ b/drivers/mtd/ubi/debug.h
2329 @@ -43,7 +43,10 @@
2330 pr_debug("UBI DBG " type ": " fmt "\n", ##__VA_ARGS__)
2331
2332 /* Just a debugging messages not related to any specific UBI subsystem */
2333 -#define dbg_msg(fmt, ...) ubi_dbg_msg("msg", fmt, ##__VA_ARGS__)
2334 +#define dbg_msg(fmt, ...) \
2335 + printk(KERN_DEBUG "UBI DBG (pid %d): %s: " fmt "\n", \
2336 + current->pid, __func__, ##__VA_ARGS__)
2337 +
2338 /* General debugging messages */
2339 #define dbg_gen(fmt, ...) ubi_dbg_msg("gen", fmt, ##__VA_ARGS__)
2340 /* Messages from the eraseblock association sub-system */
2341 diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
2342 index fb7f19b..cd26da8 100644
2343 --- a/drivers/mtd/ubi/eba.c
2344 +++ b/drivers/mtd/ubi/eba.c
2345 @@ -1028,12 +1028,14 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
2346 * 'ubi_wl_put_peb()' function on the @ubi->move_mutex. In turn, we are
2347 * holding @ubi->move_mutex and go sleep on the LEB lock. So, if the
2348 * LEB is already locked, we just do not move it and return
2349 - * %MOVE_CANCEL_RACE, which means that UBI will re-try, but later.
2350 + * %MOVE_RETRY. Note, we do not return %MOVE_CANCEL_RACE here because
2351 + * we do not know the reasons of the contention - it may be just a
2352 + * normal I/O on this LEB, so we want to re-try.
2353 */
2354 err = leb_write_trylock(ubi, vol_id, lnum);
2355 if (err) {
2356 dbg_wl("contention on LEB %d:%d, cancel", vol_id, lnum);
2357 - return MOVE_CANCEL_RACE;
2358 + return MOVE_RETRY;
2359 }
2360
2361 /*
2362 diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
2363 index dc64c76..d51d75d 100644
2364 --- a/drivers/mtd/ubi/ubi.h
2365 +++ b/drivers/mtd/ubi/ubi.h
2366 @@ -120,6 +120,7 @@ enum {
2367 * PEB
2368 * MOVE_CANCEL_BITFLIPS: canceled because a bit-flip was detected in the
2369 * target PEB
2370 + * MOVE_RETRY: retry scrubbing the PEB
2371 */
2372 enum {
2373 MOVE_CANCEL_RACE = 1,
2374 @@ -127,6 +128,7 @@ enum {
2375 MOVE_TARGET_RD_ERR,
2376 MOVE_TARGET_WR_ERR,
2377 MOVE_CANCEL_BITFLIPS,
2378 + MOVE_RETRY,
2379 };
2380
2381 /**
2382 diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
2383 index 9ad18da..890754c 100644
2384 --- a/drivers/mtd/ubi/vtbl.c
2385 +++ b/drivers/mtd/ubi/vtbl.c
2386 @@ -306,7 +306,7 @@ static int create_vtbl(struct ubi_device *ubi, struct ubi_scan_info *si,
2387 int copy, void *vtbl)
2388 {
2389 int err, tries = 0;
2390 - static struct ubi_vid_hdr *vid_hdr;
2391 + struct ubi_vid_hdr *vid_hdr;
2392 struct ubi_scan_leb *new_seb;
2393
2394 ubi_msg("create volume table (copy #%d)", copy + 1);
2395 diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
2396 index 42c684c..0696e36 100644
2397 --- a/drivers/mtd/ubi/wl.c
2398 +++ b/drivers/mtd/ubi/wl.c
2399 @@ -795,7 +795,10 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
2400 protect = 1;
2401 goto out_not_moved;
2402 }
2403 -
2404 + if (err == MOVE_RETRY) {
2405 + scrubbing = 1;
2406 + goto out_not_moved;
2407 + }
2408 if (err == MOVE_CANCEL_BITFLIPS || err == MOVE_TARGET_WR_ERR ||
2409 err == MOVE_TARGET_RD_ERR) {
2410 /*
2411 @@ -1049,7 +1052,6 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
2412
2413 ubi_err("failed to erase PEB %d, error %d", pnum, err);
2414 kfree(wl_wrk);
2415 - kmem_cache_free(ubi_wl_entry_slab, e);
2416
2417 if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
2418 err == -EBUSY) {
2419 @@ -1062,14 +1064,16 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
2420 goto out_ro;
2421 }
2422 return err;
2423 - } else if (err != -EIO) {
2424 + }
2425 +
2426 + kmem_cache_free(ubi_wl_entry_slab, e);
2427 + if (err != -EIO)
2428 /*
2429 * If this is not %-EIO, we have no idea what to do. Scheduling
2430 * this physical eraseblock for erasure again would cause
2431 * errors again and again. Well, lets switch to R/O mode.
2432 */
2433 goto out_ro;
2434 - }
2435
2436 /* It is %-EIO, the PEB went bad */
2437
2438 diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
2439 index dd2625a..f5e063a 100644
2440 --- a/drivers/net/usb/asix.c
2441 +++ b/drivers/net/usb/asix.c
2442 @@ -974,6 +974,7 @@ static int ax88772_link_reset(struct usbnet *dev)
2443
2444 static int ax88772_reset(struct usbnet *dev)
2445 {
2446 + struct asix_data *data = (struct asix_data *)&dev->data;
2447 int ret, embd_phy;
2448 u16 rx_ctl;
2449
2450 @@ -1051,6 +1052,13 @@ static int ax88772_reset(struct usbnet *dev)
2451 goto out;
2452 }
2453
2454 + /* Rewrite MAC address */
2455 + memcpy(data->mac_addr, dev->net->dev_addr, ETH_ALEN);
2456 + ret = asix_write_cmd(dev, AX_CMD_WRITE_NODE_ID, 0, 0, ETH_ALEN,
2457 + data->mac_addr);
2458 + if (ret < 0)
2459 + goto out;
2460 +
2461 /* Set RX_CTL to default values with 2k buffer, and enable cactus */
2462 ret = asix_write_rx_ctl(dev, AX_DEFAULT_RX_CTL);
2463 if (ret < 0)
2464 @@ -1316,6 +1324,13 @@ static int ax88178_reset(struct usbnet *dev)
2465 if (ret < 0)
2466 return ret;
2467
2468 + /* Rewrite MAC address */
2469 + memcpy(data->mac_addr, dev->net->dev_addr, ETH_ALEN);
2470 + ret = asix_write_cmd(dev, AX_CMD_WRITE_NODE_ID, 0, 0, ETH_ALEN,
2471 + data->mac_addr);
2472 + if (ret < 0)
2473 + return ret;
2474 +
2475 ret = asix_write_rx_ctl(dev, AX_DEFAULT_RX_CTL);
2476 if (ret < 0)
2477 return ret;
2478 diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
2479 index ccde784..f5ae3c6 100644
2480 --- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
2481 +++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
2482 @@ -526,10 +526,11 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
2483 rxs->rs_status |= ATH9K_RXERR_DECRYPT;
2484 else if (rxsp->status11 & AR_MichaelErr)
2485 rxs->rs_status |= ATH9K_RXERR_MIC;
2486 - if (rxsp->status11 & AR_KeyMiss)
2487 - rxs->rs_status |= ATH9K_RXERR_KEYMISS;
2488 }
2489
2490 + if (rxsp->status11 & AR_KeyMiss)
2491 + rxs->rs_status |= ATH9K_RXERR_KEYMISS;
2492 +
2493 return 0;
2494 }
2495 EXPORT_SYMBOL(ath9k_hw_process_rxdesc_edma);
2496 diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
2497 index 9953881..8ddef3e 100644
2498 --- a/drivers/net/wireless/ath/ath9k/calib.c
2499 +++ b/drivers/net/wireless/ath/ath9k/calib.c
2500 @@ -402,6 +402,7 @@ bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan)
2501 ah->noise = ath9k_hw_getchan_noise(ah, chan);
2502 return true;
2503 }
2504 +EXPORT_SYMBOL(ath9k_hw_getnf);
2505
2506 void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah,
2507 struct ath9k_channel *chan)
2508 diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
2509 index ecdb6fd..bbcb777 100644
2510 --- a/drivers/net/wireless/ath/ath9k/mac.c
2511 +++ b/drivers/net/wireless/ath/ath9k/mac.c
2512 @@ -621,10 +621,11 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
2513 rs->rs_status |= ATH9K_RXERR_DECRYPT;
2514 else if (ads.ds_rxstatus8 & AR_MichaelErr)
2515 rs->rs_status |= ATH9K_RXERR_MIC;
2516 - if (ads.ds_rxstatus8 & AR_KeyMiss)
2517 - rs->rs_status |= ATH9K_RXERR_KEYMISS;
2518 }
2519
2520 + if (ads.ds_rxstatus8 & AR_KeyMiss)
2521 + rs->rs_status |= ATH9K_RXERR_KEYMISS;
2522 +
2523 return 0;
2524 }
2525 EXPORT_SYMBOL(ath9k_hw_rxprocdesc);
2526 diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
2527 index a9c5ae7..f76a814 100644
2528 --- a/drivers/net/wireless/ath/ath9k/main.c
2529 +++ b/drivers/net/wireless/ath/ath9k/main.c
2530 @@ -1667,7 +1667,6 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
2531
2532 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
2533 struct ieee80211_channel *curchan = hw->conf.channel;
2534 - struct ath9k_channel old_chan;
2535 int pos = curchan->hw_value;
2536 int old_pos = -1;
2537 unsigned long flags;
2538 @@ -1693,11 +1692,8 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
2539 * Preserve the current channel values, before updating
2540 * the same channel
2541 */
2542 - if (old_pos == pos) {
2543 - memcpy(&old_chan, &sc->sc_ah->channels[pos],
2544 - sizeof(struct ath9k_channel));
2545 - ah->curchan = &old_chan;
2546 - }
2547 + if (ah->curchan && (old_pos == pos))
2548 + ath9k_hw_getnf(ah, ah->curchan);
2549
2550 ath9k_cmn_update_ichannel(&sc->sc_ah->channels[pos],
2551 curchan, conf->channel_type);
2552 diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c
2553 index b282d86..05f2ad1 100644
2554 --- a/drivers/net/wireless/iwlegacy/iwl3945-base.c
2555 +++ b/drivers/net/wireless/iwlegacy/iwl3945-base.c
2556 @@ -2656,14 +2656,13 @@ int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
2557 IWL_WARN(priv, "Invalid scan band\n");
2558 return -EIO;
2559 }
2560 -
2561 /*
2562 - * If active scaning is requested but a certain channel
2563 - * is marked passive, we can do active scanning if we
2564 - * detect transmissions.
2565 + * If active scaning is requested but a certain channel is marked
2566 + * passive, we can do active scanning if we detect transmissions. For
2567 + * passive only scanning disable switching to active on any channel.
2568 */
2569 scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
2570 - IWL_GOOD_CRC_TH_DISABLED;
2571 + IWL_GOOD_CRC_TH_NEVER;
2572
2573 len = iwl_legacy_fill_probe_req(priv, (struct ieee80211_mgmt *)scan->data,
2574 vif->addr, priv->scan_request->ie,
2575 diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
2576 index 1a52ed2..6465983 100644
2577 --- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
2578 +++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
2579 @@ -827,6 +827,7 @@ static int iwl_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt)
2580 case IEEE80211_SMPS_STATIC:
2581 case IEEE80211_SMPS_DYNAMIC:
2582 return IWL_NUM_IDLE_CHAINS_SINGLE;
2583 + case IEEE80211_SMPS_AUTOMATIC:
2584 case IEEE80211_SMPS_OFF:
2585 return active_cnt;
2586 default:
2587 diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
2588 index 5c7c17c..d552fa3 100644
2589 --- a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
2590 +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
2591 @@ -559,6 +559,9 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
2592
2593 mutex_lock(&priv->shrd->mutex);
2594
2595 + if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
2596 + goto out;
2597 +
2598 if (unlikely(test_bit(STATUS_SCANNING, &priv->shrd->status))) {
2599 IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
2600 goto out;
2601 diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
2602 index da48c8a..837b460 100644
2603 --- a/drivers/net/wireless/rt2x00/rt2800pci.c
2604 +++ b/drivers/net/wireless/rt2x00/rt2800pci.c
2605 @@ -422,7 +422,6 @@ static int rt2800pci_init_queues(struct rt2x00_dev *rt2x00dev)
2606 static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
2607 enum dev_state state)
2608 {
2609 - int mask = (state == STATE_RADIO_IRQ_ON);
2610 u32 reg;
2611 unsigned long flags;
2612
2613 @@ -436,25 +435,14 @@ static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
2614 }
2615
2616 spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
2617 - rt2x00pci_register_read(rt2x00dev, INT_MASK_CSR, &reg);
2618 - rt2x00_set_field32(&reg, INT_MASK_CSR_RXDELAYINT, 0);
2619 - rt2x00_set_field32(&reg, INT_MASK_CSR_TXDELAYINT, 0);
2620 - rt2x00_set_field32(&reg, INT_MASK_CSR_RX_DONE, mask);
2621 - rt2x00_set_field32(&reg, INT_MASK_CSR_AC0_DMA_DONE, 0);
2622 - rt2x00_set_field32(&reg, INT_MASK_CSR_AC1_DMA_DONE, 0);
2623 - rt2x00_set_field32(&reg, INT_MASK_CSR_AC2_DMA_DONE, 0);
2624 - rt2x00_set_field32(&reg, INT_MASK_CSR_AC3_DMA_DONE, 0);
2625 - rt2x00_set_field32(&reg, INT_MASK_CSR_HCCA_DMA_DONE, 0);
2626 - rt2x00_set_field32(&reg, INT_MASK_CSR_MGMT_DMA_DONE, 0);
2627 - rt2x00_set_field32(&reg, INT_MASK_CSR_MCU_COMMAND, 0);
2628 - rt2x00_set_field32(&reg, INT_MASK_CSR_RXTX_COHERENT, 0);
2629 - rt2x00_set_field32(&reg, INT_MASK_CSR_TBTT, mask);
2630 - rt2x00_set_field32(&reg, INT_MASK_CSR_PRE_TBTT, mask);
2631 - rt2x00_set_field32(&reg, INT_MASK_CSR_TX_FIFO_STATUS, mask);
2632 - rt2x00_set_field32(&reg, INT_MASK_CSR_AUTO_WAKEUP, mask);
2633 - rt2x00_set_field32(&reg, INT_MASK_CSR_GPTIMER, 0);
2634 - rt2x00_set_field32(&reg, INT_MASK_CSR_RX_COHERENT, 0);
2635 - rt2x00_set_field32(&reg, INT_MASK_CSR_TX_COHERENT, 0);
2636 + reg = 0;
2637 + if (state == STATE_RADIO_IRQ_ON) {
2638 + rt2x00_set_field32(&reg, INT_MASK_CSR_RX_DONE, 1);
2639 + rt2x00_set_field32(&reg, INT_MASK_CSR_TBTT, 1);
2640 + rt2x00_set_field32(&reg, INT_MASK_CSR_PRE_TBTT, 1);
2641 + rt2x00_set_field32(&reg, INT_MASK_CSR_TX_FIFO_STATUS, 1);
2642 + rt2x00_set_field32(&reg, INT_MASK_CSR_AUTO_WAKEUP, 1);
2643 + }
2644 rt2x00pci_register_write(rt2x00dev, INT_MASK_CSR, reg);
2645 spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
2646
2647 diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/fw.c b/drivers/net/wireless/rtlwifi/rtl8192se/fw.c
2648 index 6f91a14..3fda6b1 100644
2649 --- a/drivers/net/wireless/rtlwifi/rtl8192se/fw.c
2650 +++ b/drivers/net/wireless/rtlwifi/rtl8192se/fw.c
2651 @@ -196,6 +196,8 @@ static bool _rtl92s_firmware_downloadcode(struct ieee80211_hw *hw,
2652 /* Allocate skb buffer to contain firmware */
2653 /* info and tx descriptor info. */
2654 skb = dev_alloc_skb(frag_length);
2655 + if (!skb)
2656 + return false;
2657 skb_reserve(skb, extra_descoffset);
2658 seg_ptr = (u8 *)skb_put(skb, (u32)(frag_length -
2659 extra_descoffset));
2660 @@ -573,6 +575,8 @@ static bool _rtl92s_firmware_set_h2c_cmd(struct ieee80211_hw *hw, u8 h2c_cmd,
2661
2662 len = _rtl92s_get_h2c_cmdlen(MAX_TRANSMIT_BUFFER_SIZE, 1, &cmd_len);
2663 skb = dev_alloc_skb(len);
2664 + if (!skb)
2665 + return false;
2666 cb_desc = (struct rtl_tcb_desc *)(skb->cb);
2667 cb_desc->queue_index = TXCMD_QUEUE;
2668 cb_desc->cmd_or_init = DESC_PACKET_TYPE_NORMAL;
2669 diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
2670 index 0e6d04d..e3efb43 100644
2671 --- a/drivers/pci/msi.c
2672 +++ b/drivers/pci/msi.c
2673 @@ -870,5 +870,15 @@ EXPORT_SYMBOL(pci_msi_enabled);
2674
2675 void pci_msi_init_pci_dev(struct pci_dev *dev)
2676 {
2677 + int pos;
2678 INIT_LIST_HEAD(&dev->msi_list);
2679 +
2680 + /* Disable the msi hardware to avoid screaming interrupts
2681 + * during boot. This is the power on reset default so
2682 + * usually this should be a noop.
2683 + */
2684 + pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
2685 + if (pos)
2686 + msi_set_enable(dev, pos, 0);
2687 + msix_set_enable(dev, 0);
2688 }
2689 diff --git a/drivers/pnp/quirks.c b/drivers/pnp/quirks.c
2690 index dfbd5a6..258fef2 100644
2691 --- a/drivers/pnp/quirks.c
2692 +++ b/drivers/pnp/quirks.c
2693 @@ -295,6 +295,45 @@ static void quirk_system_pci_resources(struct pnp_dev *dev)
2694 }
2695 }
2696
2697 +#ifdef CONFIG_AMD_NB
2698 +
2699 +#include <asm/amd_nb.h>
2700 +
2701 +static void quirk_amd_mmconfig_area(struct pnp_dev *dev)
2702 +{
2703 + resource_size_t start, end;
2704 + struct pnp_resource *pnp_res;
2705 + struct resource *res;
2706 + struct resource mmconfig_res, *mmconfig;
2707 +
2708 + mmconfig = amd_get_mmconfig_range(&mmconfig_res);
2709 + if (!mmconfig)
2710 + return;
2711 +
2712 + list_for_each_entry(pnp_res, &dev->resources, list) {
2713 + res = &pnp_res->res;
2714 + if (res->end < mmconfig->start || res->start > mmconfig->end ||
2715 + (res->start == mmconfig->start && res->end == mmconfig->end))
2716 + continue;
2717 +
2718 + dev_info(&dev->dev, FW_BUG
2719 + "%pR covers only part of AMD MMCONFIG area %pR; adding more reservations\n",
2720 + res, mmconfig);
2721 + if (mmconfig->start < res->start) {
2722 + start = mmconfig->start;
2723 + end = res->start - 1;
2724 + pnp_add_mem_resource(dev, start, end, 0);
2725 + }
2726 + if (mmconfig->end > res->end) {
2727 + start = res->end + 1;
2728 + end = mmconfig->end;
2729 + pnp_add_mem_resource(dev, start, end, 0);
2730 + }
2731 + break;
2732 + }
2733 +}
2734 +#endif
2735 +
2736 /*
2737 * PnP Quirks
2738 * Cards or devices that need some tweaking due to incomplete resource info
2739 @@ -322,6 +361,9 @@ static struct pnp_fixup pnp_fixups[] = {
2740 /* PnP resources that might overlap PCI BARs */
2741 {"PNP0c01", quirk_system_pci_resources},
2742 {"PNP0c02", quirk_system_pci_resources},
2743 +#ifdef CONFIG_AMD_NB
2744 + {"PNP0c01", quirk_amd_mmconfig_area},
2745 +#endif
2746 {""}
2747 };
2748
2749 diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
2750 index 8e28625..8a1c031 100644
2751 --- a/drivers/rtc/interface.c
2752 +++ b/drivers/rtc/interface.c
2753 @@ -228,11 +228,11 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
2754 alarm->time.tm_hour = now.tm_hour;
2755
2756 /* For simplicity, only support date rollover for now */
2757 - if (alarm->time.tm_mday == -1) {
2758 + if (alarm->time.tm_mday < 1 || alarm->time.tm_mday > 31) {
2759 alarm->time.tm_mday = now.tm_mday;
2760 missing = day;
2761 }
2762 - if (alarm->time.tm_mon == -1) {
2763 + if ((unsigned)alarm->time.tm_mon >= 12) {
2764 alarm->time.tm_mon = now.tm_mon;
2765 if (missing == none)
2766 missing = month;
2767 diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
2768 index beda04a..0794c72 100644
2769 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c
2770 +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
2771 @@ -65,6 +65,8 @@ static MPT_CALLBACK mpt_callbacks[MPT_MAX_CALLBACKS];
2772
2773 #define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */
2774
2775 +#define MAX_HBA_QUEUE_DEPTH 30000
2776 +#define MAX_CHAIN_DEPTH 100000
2777 static int max_queue_depth = -1;
2778 module_param(max_queue_depth, int, 0);
2779 MODULE_PARM_DESC(max_queue_depth, " max controller queue depth ");
2780 @@ -2311,8 +2313,6 @@ _base_release_memory_pools(struct MPT2SAS_ADAPTER *ioc)
2781 }
2782 if (ioc->chain_dma_pool)
2783 pci_pool_destroy(ioc->chain_dma_pool);
2784 - }
2785 - if (ioc->chain_lookup) {
2786 free_pages((ulong)ioc->chain_lookup, ioc->chain_pages);
2787 ioc->chain_lookup = NULL;
2788 }
2789 @@ -2330,9 +2330,7 @@ static int
2790 _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
2791 {
2792 struct mpt2sas_facts *facts;
2793 - u32 queue_size, queue_diff;
2794 u16 max_sge_elements;
2795 - u16 num_of_reply_frames;
2796 u16 chains_needed_per_io;
2797 u32 sz, total_sz, reply_post_free_sz;
2798 u32 retry_sz;
2799 @@ -2359,7 +2357,8 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
2800 max_request_credit = (max_queue_depth < facts->RequestCredit)
2801 ? max_queue_depth : facts->RequestCredit;
2802 else
2803 - max_request_credit = facts->RequestCredit;
2804 + max_request_credit = min_t(u16, facts->RequestCredit,
2805 + MAX_HBA_QUEUE_DEPTH);
2806
2807 ioc->hba_queue_depth = max_request_credit;
2808 ioc->hi_priority_depth = facts->HighPriorityCredit;
2809 @@ -2400,50 +2399,25 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
2810 }
2811 ioc->chains_needed_per_io = chains_needed_per_io;
2812
2813 - /* reply free queue sizing - taking into account for events */
2814 - num_of_reply_frames = ioc->hba_queue_depth + 32;
2815 -
2816 - /* number of replies frames can't be a multiple of 16 */
2817 - /* decrease number of reply frames by 1 */
2818 - if (!(num_of_reply_frames % 16))
2819 - num_of_reply_frames--;
2820 -
2821 - /* calculate number of reply free queue entries
2822 - * (must be multiple of 16)
2823 - */
2824 -
2825 - /* (we know reply_free_queue_depth is not a multiple of 16) */
2826 - queue_size = num_of_reply_frames;
2827 - queue_size += 16 - (queue_size % 16);
2828 - ioc->reply_free_queue_depth = queue_size;
2829 -
2830 - /* reply descriptor post queue sizing */
2831 - /* this size should be the number of request frames + number of reply
2832 - * frames
2833 - */
2834 -
2835 - queue_size = ioc->hba_queue_depth + num_of_reply_frames + 1;
2836 - /* round up to 16 byte boundary */
2837 - if (queue_size % 16)
2838 - queue_size += 16 - (queue_size % 16);
2839 -
2840 - /* check against IOC maximum reply post queue depth */
2841 - if (queue_size > facts->MaxReplyDescriptorPostQueueDepth) {
2842 - queue_diff = queue_size -
2843 - facts->MaxReplyDescriptorPostQueueDepth;
2844 + /* reply free queue sizing - taking into account for 64 FW events */
2845 + ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
2846
2847 - /* round queue_diff up to multiple of 16 */
2848 - if (queue_diff % 16)
2849 - queue_diff += 16 - (queue_diff % 16);
2850 -
2851 - /* adjust hba_queue_depth, reply_free_queue_depth,
2852 - * and queue_size
2853 - */
2854 - ioc->hba_queue_depth -= (queue_diff / 2);
2855 - ioc->reply_free_queue_depth -= (queue_diff / 2);
2856 - queue_size = facts->MaxReplyDescriptorPostQueueDepth;
2857 + /* align the reply post queue on the next 16 count boundary */
2858 + if (!ioc->reply_free_queue_depth % 16)
2859 + ioc->reply_post_queue_depth = ioc->reply_free_queue_depth + 16;
2860 + else
2861 + ioc->reply_post_queue_depth = ioc->reply_free_queue_depth +
2862 + 32 - (ioc->reply_free_queue_depth % 16);
2863 + if (ioc->reply_post_queue_depth >
2864 + facts->MaxReplyDescriptorPostQueueDepth) {
2865 + ioc->reply_post_queue_depth = min_t(u16,
2866 + (facts->MaxReplyDescriptorPostQueueDepth -
2867 + (facts->MaxReplyDescriptorPostQueueDepth % 16)),
2868 + (ioc->hba_queue_depth - (ioc->hba_queue_depth % 16)));
2869 + ioc->reply_free_queue_depth = ioc->reply_post_queue_depth - 16;
2870 + ioc->hba_queue_depth = ioc->reply_free_queue_depth - 64;
2871 }
2872 - ioc->reply_post_queue_depth = queue_size;
2873 +
2874
2875 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scatter gather: "
2876 "sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), "
2877 @@ -2529,15 +2503,12 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
2878 "depth(%d)\n", ioc->name, ioc->request,
2879 ioc->scsiio_depth));
2880
2881 - /* loop till the allocation succeeds */
2882 - do {
2883 - sz = ioc->chain_depth * sizeof(struct chain_tracker);
2884 - ioc->chain_pages = get_order(sz);
2885 - ioc->chain_lookup = (struct chain_tracker *)__get_free_pages(
2886 - GFP_KERNEL, ioc->chain_pages);
2887 - if (ioc->chain_lookup == NULL)
2888 - ioc->chain_depth -= 100;
2889 - } while (ioc->chain_lookup == NULL);
2890 + ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
2891 + sz = ioc->chain_depth * sizeof(struct chain_tracker);
2892 + ioc->chain_pages = get_order(sz);
2893 +
2894 + ioc->chain_lookup = (struct chain_tracker *)__get_free_pages(
2895 + GFP_KERNEL, ioc->chain_pages);
2896 ioc->chain_dma_pool = pci_pool_create("chain pool", ioc->pdev,
2897 ioc->request_sz, 16, 0);
2898 if (!ioc->chain_dma_pool) {
2899 diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
2900 index d570573..9bc6fb2 100644
2901 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
2902 +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
2903 @@ -1007,8 +1007,8 @@ _scsih_get_chain_buffer_tracker(struct MPT2SAS_ADAPTER *ioc, u16 smid)
2904 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
2905 if (list_empty(&ioc->free_chain_list)) {
2906 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
2907 - printk(MPT2SAS_WARN_FMT "chain buffers not available\n",
2908 - ioc->name);
2909 + dfailprintk(ioc, printk(MPT2SAS_WARN_FMT "chain buffers not "
2910 + "available\n", ioc->name));
2911 return NULL;
2912 }
2913 chain_req = list_entry(ioc->free_chain_list.next,
2914 @@ -6714,6 +6714,7 @@ _scsih_mark_responding_raid_device(struct MPT2SAS_ADAPTER *ioc, u64 wwid,
2915 } else
2916 sas_target_priv_data = NULL;
2917 raid_device->responding = 1;
2918 + spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2919 starget_printk(KERN_INFO, raid_device->starget,
2920 "handle(0x%04x), wwid(0x%016llx)\n", handle,
2921 (unsigned long long)raid_device->wwid);
2922 @@ -6724,16 +6725,16 @@ _scsih_mark_responding_raid_device(struct MPT2SAS_ADAPTER *ioc, u64 wwid,
2923 */
2924 _scsih_init_warpdrive_properties(ioc, raid_device);
2925 if (raid_device->handle == handle)
2926 - goto out;
2927 + return;
2928 printk(KERN_INFO "\thandle changed from(0x%04x)!!!\n",
2929 raid_device->handle);
2930 raid_device->handle = handle;
2931 if (sas_target_priv_data)
2932 sas_target_priv_data->handle = handle;
2933 - goto out;
2934 + return;
2935 }
2936 }
2937 - out:
2938 +
2939 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2940 }
2941
2942 diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
2943 index fa3a591..4b63c73 100644
2944 --- a/drivers/scsi/sd.c
2945 +++ b/drivers/scsi/sd.c
2946 @@ -1074,6 +1074,10 @@ static int sd_ioctl(struct block_device *bdev, fmode_t mode,
2947 SCSI_LOG_IOCTL(1, sd_printk(KERN_INFO, sdkp, "sd_ioctl: disk=%s, "
2948 "cmd=0x%x\n", disk->disk_name, cmd));
2949
2950 + error = scsi_verify_blk_ioctl(bdev, cmd);
2951 + if (error < 0)
2952 + return error;
2953 +
2954 /*
2955 * If we are in the middle of error recovery, don't let anyone
2956 * else try and use this device. Also, if error recovery fails, it
2957 @@ -1096,7 +1100,7 @@ static int sd_ioctl(struct block_device *bdev, fmode_t mode,
2958 error = scsi_ioctl(sdp, cmd, p);
2959 break;
2960 default:
2961 - error = scsi_cmd_ioctl(disk->queue, disk, mode, cmd, p);
2962 + error = scsi_cmd_blk_ioctl(bdev, mode, cmd, p);
2963 if (error != -ENOTTY)
2964 break;
2965 error = scsi_ioctl(sdp, cmd, p);
2966 @@ -1266,6 +1270,11 @@ static int sd_compat_ioctl(struct block_device *bdev, fmode_t mode,
2967 unsigned int cmd, unsigned long arg)
2968 {
2969 struct scsi_device *sdev = scsi_disk(bdev->bd_disk)->device;
2970 + int ret;
2971 +
2972 + ret = scsi_verify_blk_ioctl(bdev, cmd);
2973 + if (ret < 0)
2974 + return -ENOIOCTLCMD;
2975
2976 /*
2977 * If we are in the middle of error recovery, don't let anyone
2978 @@ -1277,8 +1286,6 @@ static int sd_compat_ioctl(struct block_device *bdev, fmode_t mode,
2979 return -ENODEV;
2980
2981 if (sdev->host->hostt->compat_ioctl) {
2982 - int ret;
2983 -
2984 ret = sdev->host->hostt->compat_ioctl(sdev, cmd, (void __user *)arg);
2985
2986 return ret;
2987 diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
2988 index b4543f5..36d1ed7 100644
2989 --- a/drivers/scsi/sym53c8xx_2/sym_glue.c
2990 +++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
2991 @@ -839,6 +839,10 @@ static void sym53c8xx_slave_destroy(struct scsi_device *sdev)
2992 struct sym_lcb *lp = sym_lp(tp, sdev->lun);
2993 unsigned long flags;
2994
2995 + /* if slave_alloc returned before allocating a sym_lcb, return */
2996 + if (!lp)
2997 + return;
2998 +
2999 spin_lock_irqsave(np->s.host->host_lock, flags);
3000
3001 if (lp->busy_itlq || lp->busy_itl) {
3002 diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
3003 index 831468b..2e8c1be 100644
3004 --- a/drivers/target/target_core_cdb.c
3005 +++ b/drivers/target/target_core_cdb.c
3006 @@ -94,6 +94,18 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
3007 buf[2] = dev->transport->get_device_rev(dev);
3008
3009 /*
3010 + * NORMACA and HISUP = 0, RESPONSE DATA FORMAT = 2
3011 + *
3012 + * SPC4 says:
3013 + * A RESPONSE DATA FORMAT field set to 2h indicates that the
3014 + * standard INQUIRY data is in the format defined in this
3015 + * standard. Response data format values less than 2h are
3016 + * obsolete. Response data format values greater than 2h are
3017 + * reserved.
3018 + */
3019 + buf[3] = 2;
3020 +
3021 + /*
3022 * Enable SCCS and TPGS fields for Emulated ALUA
3023 */
3024 if (dev->se_sub_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED)
3025 diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
3026 index 0257658..e87d0eb 100644
3027 --- a/drivers/target/target_core_transport.c
3028 +++ b/drivers/target/target_core_transport.c
3029 @@ -4353,6 +4353,7 @@ int transport_send_check_condition_and_sense(
3030 case TCM_NON_EXISTENT_LUN:
3031 /* CURRENT ERROR */
3032 buffer[offset] = 0x70;
3033 + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3034 /* ILLEGAL REQUEST */
3035 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
3036 /* LOGICAL UNIT NOT SUPPORTED */
3037 @@ -4362,6 +4363,7 @@ int transport_send_check_condition_and_sense(
3038 case TCM_SECTOR_COUNT_TOO_MANY:
3039 /* CURRENT ERROR */
3040 buffer[offset] = 0x70;
3041 + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3042 /* ILLEGAL REQUEST */
3043 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
3044 /* INVALID COMMAND OPERATION CODE */
3045 @@ -4370,6 +4372,7 @@ int transport_send_check_condition_and_sense(
3046 case TCM_UNKNOWN_MODE_PAGE:
3047 /* CURRENT ERROR */
3048 buffer[offset] = 0x70;
3049 + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3050 /* ILLEGAL REQUEST */
3051 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
3052 /* INVALID FIELD IN CDB */
3053 @@ -4378,6 +4381,7 @@ int transport_send_check_condition_and_sense(
3054 case TCM_CHECK_CONDITION_ABORT_CMD:
3055 /* CURRENT ERROR */
3056 buffer[offset] = 0x70;
3057 + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3058 /* ABORTED COMMAND */
3059 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
3060 /* BUS DEVICE RESET FUNCTION OCCURRED */
3061 @@ -4387,6 +4391,7 @@ int transport_send_check_condition_and_sense(
3062 case TCM_INCORRECT_AMOUNT_OF_DATA:
3063 /* CURRENT ERROR */
3064 buffer[offset] = 0x70;
3065 + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3066 /* ABORTED COMMAND */
3067 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
3068 /* WRITE ERROR */
3069 @@ -4397,6 +4402,7 @@ int transport_send_check_condition_and_sense(
3070 case TCM_INVALID_CDB_FIELD:
3071 /* CURRENT ERROR */
3072 buffer[offset] = 0x70;
3073 + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3074 /* ABORTED COMMAND */
3075 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
3076 /* INVALID FIELD IN CDB */
3077 @@ -4405,6 +4411,7 @@ int transport_send_check_condition_and_sense(
3078 case TCM_INVALID_PARAMETER_LIST:
3079 /* CURRENT ERROR */
3080 buffer[offset] = 0x70;
3081 + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3082 /* ABORTED COMMAND */
3083 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
3084 /* INVALID FIELD IN PARAMETER LIST */
3085 @@ -4413,6 +4420,7 @@ int transport_send_check_condition_and_sense(
3086 case TCM_UNEXPECTED_UNSOLICITED_DATA:
3087 /* CURRENT ERROR */
3088 buffer[offset] = 0x70;
3089 + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3090 /* ABORTED COMMAND */
3091 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
3092 /* WRITE ERROR */
3093 @@ -4423,6 +4431,7 @@ int transport_send_check_condition_and_sense(
3094 case TCM_SERVICE_CRC_ERROR:
3095 /* CURRENT ERROR */
3096 buffer[offset] = 0x70;
3097 + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3098 /* ABORTED COMMAND */
3099 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
3100 /* PROTOCOL SERVICE CRC ERROR */
3101 @@ -4433,6 +4442,7 @@ int transport_send_check_condition_and_sense(
3102 case TCM_SNACK_REJECTED:
3103 /* CURRENT ERROR */
3104 buffer[offset] = 0x70;
3105 + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3106 /* ABORTED COMMAND */
3107 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
3108 /* READ ERROR */
3109 @@ -4443,6 +4453,7 @@ int transport_send_check_condition_and_sense(
3110 case TCM_WRITE_PROTECTED:
3111 /* CURRENT ERROR */
3112 buffer[offset] = 0x70;
3113 + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3114 /* DATA PROTECT */
3115 buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT;
3116 /* WRITE PROTECTED */
3117 @@ -4451,6 +4462,7 @@ int transport_send_check_condition_and_sense(
3118 case TCM_CHECK_CONDITION_UNIT_ATTENTION:
3119 /* CURRENT ERROR */
3120 buffer[offset] = 0x70;
3121 + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3122 /* UNIT ATTENTION */
3123 buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
3124 core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
3125 @@ -4460,6 +4472,7 @@ int transport_send_check_condition_and_sense(
3126 case TCM_CHECK_CONDITION_NOT_READY:
3127 /* CURRENT ERROR */
3128 buffer[offset] = 0x70;
3129 + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3130 /* Not Ready */
3131 buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY;
3132 transport_get_sense_codes(cmd, &asc, &ascq);
3133 @@ -4470,6 +4483,7 @@ int transport_send_check_condition_and_sense(
3134 default:
3135 /* CURRENT ERROR */
3136 buffer[offset] = 0x70;
3137 + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3138 /* ILLEGAL REQUEST */
3139 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
3140 /* LOGICAL UNIT COMMUNICATION FAILURE */
3141 diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
3142 index ede860f..a580b17 100644
3143 --- a/drivers/xen/xenbus/xenbus_xs.c
3144 +++ b/drivers/xen/xenbus/xenbus_xs.c
3145 @@ -801,6 +801,12 @@ static int process_msg(void)
3146 goto out;
3147 }
3148
3149 + if (msg->hdr.len > XENSTORE_PAYLOAD_MAX) {
3150 + kfree(msg);
3151 + err = -EINVAL;
3152 + goto out;
3153 + }
3154 +
3155 body = kmalloc(msg->hdr.len + 1, GFP_NOIO | __GFP_HIGH);
3156 if (body == NULL) {
3157 kfree(msg);
3158 diff --git a/fs/aio.c b/fs/aio.c
3159 index 78c514c..969beb0 100644
3160 --- a/fs/aio.c
3161 +++ b/fs/aio.c
3162 @@ -476,14 +476,21 @@ static void kiocb_batch_init(struct kiocb_batch *batch, long total)
3163 batch->count = total;
3164 }
3165
3166 -static void kiocb_batch_free(struct kiocb_batch *batch)
3167 +static void kiocb_batch_free(struct kioctx *ctx, struct kiocb_batch *batch)
3168 {
3169 struct kiocb *req, *n;
3170
3171 + if (list_empty(&batch->head))
3172 + return;
3173 +
3174 + spin_lock_irq(&ctx->ctx_lock);
3175 list_for_each_entry_safe(req, n, &batch->head, ki_batch) {
3176 list_del(&req->ki_batch);
3177 + list_del(&req->ki_list);
3178 kmem_cache_free(kiocb_cachep, req);
3179 + ctx->reqs_active--;
3180 }
3181 + spin_unlock_irq(&ctx->ctx_lock);
3182 }
3183
3184 /*
3185 @@ -1742,7 +1749,7 @@ long do_io_submit(aio_context_t ctx_id, long nr,
3186 }
3187 blk_finish_plug(&plug);
3188
3189 - kiocb_batch_free(&batch);
3190 + kiocb_batch_free(ctx, &batch);
3191 put_ioctx(ctx);
3192 return i ? i : ret;
3193 }
3194 diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
3195 index f3670cf..63e4be4 100644
3196 --- a/fs/cifs/connect.c
3197 +++ b/fs/cifs/connect.c
3198 @@ -2914,18 +2914,33 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
3199 #define CIFS_DEFAULT_IOSIZE (1024 * 1024)
3200
3201 /*
3202 - * Windows only supports a max of 60k reads. Default to that when posix
3203 - * extensions aren't in force.
3204 + * Windows only supports a max of 60kb reads and 65535 byte writes. Default to
3205 + * those values when posix extensions aren't in force. In actuality here, we
3206 + * use 65536 to allow for a write that is a multiple of 4k. Most servers seem
3207 + * to be ok with the extra byte even though Windows doesn't send writes that
3208 + * are that large.
3209 + *
3210 + * Citation:
3211 + *
3212 + * http://blogs.msdn.com/b/openspecification/archive/2009/04/10/smb-maximum-transmit-buffer-size-and-performance-tuning.aspx
3213 */
3214 #define CIFS_DEFAULT_NON_POSIX_RSIZE (60 * 1024)
3215 +#define CIFS_DEFAULT_NON_POSIX_WSIZE (65536)
3216
3217 static unsigned int
3218 cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
3219 {
3220 __u64 unix_cap = le64_to_cpu(tcon->fsUnixInfo.Capability);
3221 struct TCP_Server_Info *server = tcon->ses->server;
3222 - unsigned int wsize = pvolume_info->wsize ? pvolume_info->wsize :
3223 - CIFS_DEFAULT_IOSIZE;
3224 + unsigned int wsize;
3225 +
3226 + /* start with specified wsize, or default */
3227 + if (pvolume_info->wsize)
3228 + wsize = pvolume_info->wsize;
3229 + else if (tcon->unix_ext && (unix_cap & CIFS_UNIX_LARGE_WRITE_CAP))
3230 + wsize = CIFS_DEFAULT_IOSIZE;
3231 + else
3232 + wsize = CIFS_DEFAULT_NON_POSIX_WSIZE;
3233
3234 /* can server support 24-bit write sizes? (via UNIX extensions) */
3235 if (!tcon->unix_ext || !(unix_cap & CIFS_UNIX_LARGE_WRITE_CAP))
3236 diff --git a/fs/dcache.c b/fs/dcache.c
3237 index 89509b5..f7908ae 100644
3238 --- a/fs/dcache.c
3239 +++ b/fs/dcache.c
3240 @@ -242,6 +242,7 @@ static void dentry_lru_add(struct dentry *dentry)
3241 static void __dentry_lru_del(struct dentry *dentry)
3242 {
3243 list_del_init(&dentry->d_lru);
3244 + dentry->d_flags &= ~DCACHE_SHRINK_LIST;
3245 dentry->d_sb->s_nr_dentry_unused--;
3246 dentry_stat.nr_unused--;
3247 }
3248 @@ -275,15 +276,15 @@ static void dentry_lru_prune(struct dentry *dentry)
3249 }
3250 }
3251
3252 -static void dentry_lru_move_tail(struct dentry *dentry)
3253 +static void dentry_lru_move_list(struct dentry *dentry, struct list_head *list)
3254 {
3255 spin_lock(&dcache_lru_lock);
3256 if (list_empty(&dentry->d_lru)) {
3257 - list_add_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
3258 + list_add_tail(&dentry->d_lru, list);
3259 dentry->d_sb->s_nr_dentry_unused++;
3260 dentry_stat.nr_unused++;
3261 } else {
3262 - list_move_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
3263 + list_move_tail(&dentry->d_lru, list);
3264 }
3265 spin_unlock(&dcache_lru_lock);
3266 }
3267 @@ -769,14 +770,18 @@ static void shrink_dentry_list(struct list_head *list)
3268 }
3269
3270 /**
3271 - * __shrink_dcache_sb - shrink the dentry LRU on a given superblock
3272 - * @sb: superblock to shrink dentry LRU.
3273 - * @count: number of entries to prune
3274 - * @flags: flags to control the dentry processing
3275 + * prune_dcache_sb - shrink the dcache
3276 + * @sb: superblock
3277 + * @count: number of entries to try to free
3278 + *
3279 + * Attempt to shrink the superblock dcache LRU by @count entries. This is
3280 + * done when we need more memory an called from the superblock shrinker
3281 + * function.
3282 *
3283 - * If flags contains DCACHE_REFERENCED reference dentries will not be pruned.
3284 + * This function may fail to free any resources if all the dentries are in
3285 + * use.
3286 */
3287 -static void __shrink_dcache_sb(struct super_block *sb, int count, int flags)
3288 +void prune_dcache_sb(struct super_block *sb, int count)
3289 {
3290 struct dentry *dentry;
3291 LIST_HEAD(referenced);
3292 @@ -795,18 +800,13 @@ relock:
3293 goto relock;
3294 }
3295
3296 - /*
3297 - * If we are honouring the DCACHE_REFERENCED flag and the
3298 - * dentry has this flag set, don't free it. Clear the flag
3299 - * and put it back on the LRU.
3300 - */
3301 - if (flags & DCACHE_REFERENCED &&
3302 - dentry->d_flags & DCACHE_REFERENCED) {
3303 + if (dentry->d_flags & DCACHE_REFERENCED) {
3304 dentry->d_flags &= ~DCACHE_REFERENCED;
3305 list_move(&dentry->d_lru, &referenced);
3306 spin_unlock(&dentry->d_lock);
3307 } else {
3308 list_move_tail(&dentry->d_lru, &tmp);
3309 + dentry->d_flags |= DCACHE_SHRINK_LIST;
3310 spin_unlock(&dentry->d_lock);
3311 if (!--count)
3312 break;
3313 @@ -821,23 +821,6 @@ relock:
3314 }
3315
3316 /**
3317 - * prune_dcache_sb - shrink the dcache
3318 - * @sb: superblock
3319 - * @nr_to_scan: number of entries to try to free
3320 - *
3321 - * Attempt to shrink the superblock dcache LRU by @nr_to_scan entries. This is
3322 - * done when we need more memory an called from the superblock shrinker
3323 - * function.
3324 - *
3325 - * This function may fail to free any resources if all the dentries are in
3326 - * use.
3327 - */
3328 -void prune_dcache_sb(struct super_block *sb, int nr_to_scan)
3329 -{
3330 - __shrink_dcache_sb(sb, nr_to_scan, DCACHE_REFERENCED);
3331 -}
3332 -
3333 -/**
3334 * shrink_dcache_sb - shrink dcache for a superblock
3335 * @sb: superblock
3336 *
3337 @@ -1091,7 +1074,7 @@ EXPORT_SYMBOL(have_submounts);
3338 * drop the lock and return early due to latency
3339 * constraints.
3340 */
3341 -static int select_parent(struct dentry * parent)
3342 +static int select_parent(struct dentry *parent, struct list_head *dispose)
3343 {
3344 struct dentry *this_parent;
3345 struct list_head *next;
3346 @@ -1113,17 +1096,21 @@ resume:
3347
3348 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
3349
3350 - /*
3351 - * move only zero ref count dentries to the end
3352 - * of the unused list for prune_dcache
3353 + /*
3354 + * move only zero ref count dentries to the dispose list.
3355 + *
3356 + * Those which are presently on the shrink list, being processed
3357 + * by shrink_dentry_list(), shouldn't be moved. Otherwise the
3358 + * loop in shrink_dcache_parent() might not make any progress
3359 + * and loop forever.
3360 */
3361 - if (!dentry->d_count) {
3362 - dentry_lru_move_tail(dentry);
3363 - found++;
3364 - } else {
3365 + if (dentry->d_count) {
3366 dentry_lru_del(dentry);
3367 + } else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) {
3368 + dentry_lru_move_list(dentry, dispose);
3369 + dentry->d_flags |= DCACHE_SHRINK_LIST;
3370 + found++;
3371 }
3372 -
3373 /*
3374 * We can return to the caller if we have found some (this
3375 * ensures forward progress). We'll be coming back to find
3376 @@ -1180,14 +1167,13 @@ rename_retry:
3377 *
3378 * Prune the dcache to remove unused children of the parent dentry.
3379 */
3380 -
3381 void shrink_dcache_parent(struct dentry * parent)
3382 {
3383 - struct super_block *sb = parent->d_sb;
3384 + LIST_HEAD(dispose);
3385 int found;
3386
3387 - while ((found = select_parent(parent)) != 0)
3388 - __shrink_dcache_sb(sb, found, 0);
3389 + while ((found = select_parent(parent, &dispose)) != 0)
3390 + shrink_dentry_list(&dispose);
3391 }
3392 EXPORT_SYMBOL(shrink_dcache_parent);
3393
3394 diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
3395 index a567968..ab25f57 100644
3396 --- a/fs/ext4/ioctl.c
3397 +++ b/fs/ext4/ioctl.c
3398 @@ -182,19 +182,22 @@ setversion_out:
3399 if (err)
3400 return err;
3401
3402 - if (get_user(n_blocks_count, (__u32 __user *)arg))
3403 - return -EFAULT;
3404 + if (get_user(n_blocks_count, (__u32 __user *)arg)) {
3405 + err = -EFAULT;
3406 + goto group_extend_out;
3407 + }
3408
3409 if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
3410 EXT4_FEATURE_RO_COMPAT_BIGALLOC)) {
3411 ext4_msg(sb, KERN_ERR,
3412 "Online resizing not supported with bigalloc");
3413 - return -EOPNOTSUPP;
3414 + err = -EOPNOTSUPP;
3415 + goto group_extend_out;
3416 }
3417
3418 err = mnt_want_write(filp->f_path.mnt);
3419 if (err)
3420 - return err;
3421 + goto group_extend_out;
3422
3423 err = ext4_group_extend(sb, EXT4_SB(sb)->s_es, n_blocks_count);
3424 if (EXT4_SB(sb)->s_journal) {
3425 @@ -204,9 +207,10 @@ setversion_out:
3426 }
3427 if (err == 0)
3428 err = err2;
3429 +
3430 mnt_drop_write(filp->f_path.mnt);
3431 +group_extend_out:
3432 ext4_resize_end(sb);
3433 -
3434 return err;
3435 }
3436
3437 @@ -267,19 +271,22 @@ mext_out:
3438 return err;
3439
3440 if (copy_from_user(&input, (struct ext4_new_group_input __user *)arg,
3441 - sizeof(input)))
3442 - return -EFAULT;
3443 + sizeof(input))) {
3444 + err = -EFAULT;
3445 + goto group_add_out;
3446 + }
3447
3448 if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
3449 EXT4_FEATURE_RO_COMPAT_BIGALLOC)) {
3450 ext4_msg(sb, KERN_ERR,
3451 "Online resizing not supported with bigalloc");
3452 - return -EOPNOTSUPP;
3453 + err = -EOPNOTSUPP;
3454 + goto group_add_out;
3455 }
3456
3457 err = mnt_want_write(filp->f_path.mnt);
3458 if (err)
3459 - return err;
3460 + goto group_add_out;
3461
3462 err = ext4_group_add(sb, &input);
3463 if (EXT4_SB(sb)->s_journal) {
3464 @@ -289,9 +296,10 @@ mext_out:
3465 }
3466 if (err == 0)
3467 err = err2;
3468 +
3469 mnt_drop_write(filp->f_path.mnt);
3470 +group_add_out:
3471 ext4_resize_end(sb);
3472 -
3473 return err;
3474 }
3475
3476 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
3477 index 3e1329e..9281dbe 100644
3478 --- a/fs/ext4/super.c
3479 +++ b/fs/ext4/super.c
3480 @@ -2006,17 +2006,16 @@ static int ext4_fill_flex_info(struct super_block *sb)
3481 struct ext4_group_desc *gdp = NULL;
3482 ext4_group_t flex_group_count;
3483 ext4_group_t flex_group;
3484 - int groups_per_flex = 0;
3485 + unsigned int groups_per_flex = 0;
3486 size_t size;
3487 int i;
3488
3489 sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex;
3490 - groups_per_flex = 1 << sbi->s_log_groups_per_flex;
3491 -
3492 - if (groups_per_flex < 2) {
3493 + if (sbi->s_log_groups_per_flex < 1 || sbi->s_log_groups_per_flex > 31) {
3494 sbi->s_log_groups_per_flex = 0;
3495 return 1;
3496 }
3497 + groups_per_flex = 1 << sbi->s_log_groups_per_flex;
3498
3499 /* We allocate both existing and potentially added groups */
3500 flex_group_count = ((sbi->s_groups_count + groups_per_flex - 1) +
3501 diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
3502 index 281ae95..3db6b82 100644
3503 --- a/fs/nfs/blocklayout/blocklayout.c
3504 +++ b/fs/nfs/blocklayout/blocklayout.c
3505 @@ -146,14 +146,19 @@ static struct bio *bl_alloc_init_bio(int npg, sector_t isect,
3506 {
3507 struct bio *bio;
3508
3509 + npg = min(npg, BIO_MAX_PAGES);
3510 bio = bio_alloc(GFP_NOIO, npg);
3511 - if (!bio)
3512 - return NULL;
3513 + if (!bio && (current->flags & PF_MEMALLOC)) {
3514 + while (!bio && (npg /= 2))
3515 + bio = bio_alloc(GFP_NOIO, npg);
3516 + }
3517
3518 - bio->bi_sector = isect - be->be_f_offset + be->be_v_offset;
3519 - bio->bi_bdev = be->be_mdev;
3520 - bio->bi_end_io = end_io;
3521 - bio->bi_private = par;
3522 + if (bio) {
3523 + bio->bi_sector = isect - be->be_f_offset + be->be_v_offset;
3524 + bio->bi_bdev = be->be_mdev;
3525 + bio->bi_end_io = end_io;
3526 + bio->bi_private = par;
3527 + }
3528 return bio;
3529 }
3530
3531 @@ -779,16 +784,13 @@ bl_cleanup_layoutcommit(struct nfs4_layoutcommit_data *lcdata)
3532 static void free_blk_mountid(struct block_mount_id *mid)
3533 {
3534 if (mid) {
3535 - struct pnfs_block_dev *dev;
3536 - spin_lock(&mid->bm_lock);
3537 - while (!list_empty(&mid->bm_devlist)) {
3538 - dev = list_first_entry(&mid->bm_devlist,
3539 - struct pnfs_block_dev,
3540 - bm_node);
3541 + struct pnfs_block_dev *dev, *tmp;
3542 +
3543 + /* No need to take bm_lock as we are last user freeing bm_devlist */
3544 + list_for_each_entry_safe(dev, tmp, &mid->bm_devlist, bm_node) {
3545 list_del(&dev->bm_node);
3546 bl_free_block_dev(dev);
3547 }
3548 - spin_unlock(&mid->bm_lock);
3549 kfree(mid);
3550 }
3551 }
3552 diff --git a/fs/nfs/blocklayout/extents.c b/fs/nfs/blocklayout/extents.c
3553 index 19fa7b0..c69682a 100644
3554 --- a/fs/nfs/blocklayout/extents.c
3555 +++ b/fs/nfs/blocklayout/extents.c
3556 @@ -139,11 +139,13 @@ static int _set_range(struct my_tree *tree, int32_t tag, u64 s, u64 length)
3557 }
3558
3559 /* Ensure that future operations on given range of tree will not malloc */
3560 -static int _preload_range(struct my_tree *tree, u64 offset, u64 length)
3561 +static int _preload_range(struct pnfs_inval_markings *marks,
3562 + u64 offset, u64 length)
3563 {
3564 u64 start, end, s;
3565 int count, i, used = 0, status = -ENOMEM;
3566 struct pnfs_inval_tracking **storage;
3567 + struct my_tree *tree = &marks->im_tree;
3568
3569 dprintk("%s(%llu, %llu) enter\n", __func__, offset, length);
3570 start = normalize(offset, tree->mtt_step_size);
3571 @@ -161,12 +163,11 @@ static int _preload_range(struct my_tree *tree, u64 offset, u64 length)
3572 goto out_cleanup;
3573 }
3574
3575 - /* Now need lock - HOW??? */
3576 -
3577 + spin_lock(&marks->im_lock);
3578 for (s = start; s < end; s += tree->mtt_step_size)
3579 used += _add_entry(tree, s, INTERNAL_EXISTS, storage[used]);
3580 + spin_unlock(&marks->im_lock);
3581
3582 - /* Unlock - HOW??? */
3583 status = 0;
3584
3585 out_cleanup:
3586 @@ -286,7 +287,7 @@ int bl_mark_sectors_init(struct pnfs_inval_markings *marks,
3587
3588 start = normalize(offset, marks->im_block_size);
3589 end = normalize_up(offset + length, marks->im_block_size);
3590 - if (_preload_range(&marks->im_tree, start, end - start))
3591 + if (_preload_range(marks, start, end - start))
3592 goto outerr;
3593
3594 spin_lock(&marks->im_lock);
3595 diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
3596 index 43926ad..54cea8a 100644
3597 --- a/fs/nfs/callback_proc.c
3598 +++ b/fs/nfs/callback_proc.c
3599 @@ -339,7 +339,7 @@ validate_seqid(struct nfs4_slot_table *tbl, struct cb_sequenceargs * args)
3600 dprintk("%s enter. slotid %d seqid %d\n",
3601 __func__, args->csa_slotid, args->csa_sequenceid);
3602
3603 - if (args->csa_slotid > NFS41_BC_MAX_CALLBACKS)
3604 + if (args->csa_slotid >= NFS41_BC_MAX_CALLBACKS)
3605 return htonl(NFS4ERR_BADSLOT);
3606
3607 slot = tbl->slots + args->csa_slotid;
3608 diff --git a/fs/nfs/file.c b/fs/nfs/file.c
3609 index 606ef0f..c43a452 100644
3610 --- a/fs/nfs/file.c
3611 +++ b/fs/nfs/file.c
3612 @@ -272,13 +272,13 @@ nfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync)
3613 datasync);
3614
3615 ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
3616 - if (ret)
3617 - return ret;
3618 mutex_lock(&inode->i_mutex);
3619
3620 nfs_inc_stats(inode, NFSIOS_VFSFSYNC);
3621 have_error = test_and_clear_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
3622 status = nfs_commit_inode(inode, FLUSH_SYNC);
3623 + if (status >= 0 && ret < 0)
3624 + status = ret;
3625 have_error |= test_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
3626 if (have_error)
3627 ret = xchg(&ctx->error, 0);
3628 diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
3629 index d9f4d78..055d702 100644
3630 --- a/fs/nfs/nfs4proc.c
3631 +++ b/fs/nfs/nfs4proc.c
3632 @@ -3430,19 +3430,6 @@ static inline int nfs4_server_supports_acls(struct nfs_server *server)
3633 */
3634 #define NFS4ACL_MAXPAGES (XATTR_SIZE_MAX >> PAGE_CACHE_SHIFT)
3635
3636 -static void buf_to_pages(const void *buf, size_t buflen,
3637 - struct page **pages, unsigned int *pgbase)
3638 -{
3639 - const void *p = buf;
3640 -
3641 - *pgbase = offset_in_page(buf);
3642 - p -= *pgbase;
3643 - while (p < buf + buflen) {
3644 - *(pages++) = virt_to_page(p);
3645 - p += PAGE_CACHE_SIZE;
3646 - }
3647 -}
3648 -
3649 static int buf_to_pages_noslab(const void *buf, size_t buflen,
3650 struct page **pages, unsigned int *pgbase)
3651 {
3652 @@ -3539,9 +3526,19 @@ out:
3653 nfs4_set_cached_acl(inode, acl);
3654 }
3655
3656 +/*
3657 + * The getxattr API returns the required buffer length when called with a
3658 + * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating
3659 + * the required buf. On a NULL buf, we send a page of data to the server
3660 + * guessing that the ACL request can be serviced by a page. If so, we cache
3661 + * up to the page of ACL data, and the 2nd call to getxattr is serviced by
3662 + * the cache. If not so, we throw away the page, and cache the required
3663 + * length. The next getxattr call will then produce another round trip to
3664 + * the server, this time with the input buf of the required size.
3665 + */
3666 static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
3667 {
3668 - struct page *pages[NFS4ACL_MAXPAGES];
3669 + struct page *pages[NFS4ACL_MAXPAGES] = {NULL, };
3670 struct nfs_getaclargs args = {
3671 .fh = NFS_FH(inode),
3672 .acl_pages = pages,
3673 @@ -3556,41 +3553,60 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
3674 .rpc_argp = &args,
3675 .rpc_resp = &res,
3676 };
3677 - struct page *localpage = NULL;
3678 - int ret;
3679 + int ret = -ENOMEM, npages, i, acl_len = 0;
3680
3681 - if (buflen < PAGE_SIZE) {
3682 - /* As long as we're doing a round trip to the server anyway,
3683 - * let's be prepared for a page of acl data. */
3684 - localpage = alloc_page(GFP_KERNEL);
3685 - resp_buf = page_address(localpage);
3686 - if (localpage == NULL)
3687 - return -ENOMEM;
3688 - args.acl_pages[0] = localpage;
3689 - args.acl_pgbase = 0;
3690 - args.acl_len = PAGE_SIZE;
3691 - } else {
3692 - resp_buf = buf;
3693 - buf_to_pages(buf, buflen, args.acl_pages, &args.acl_pgbase);
3694 + npages = (buflen + PAGE_SIZE - 1) >> PAGE_SHIFT;
3695 + /* As long as we're doing a round trip to the server anyway,
3696 + * let's be prepared for a page of acl data. */
3697 + if (npages == 0)
3698 + npages = 1;
3699 +
3700 + for (i = 0; i < npages; i++) {
3701 + pages[i] = alloc_page(GFP_KERNEL);
3702 + if (!pages[i])
3703 + goto out_free;
3704 + }
3705 + if (npages > 1) {
3706 + /* for decoding across pages */
3707 + args.acl_scratch = alloc_page(GFP_KERNEL);
3708 + if (!args.acl_scratch)
3709 + goto out_free;
3710 }
3711 - ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0);
3712 + args.acl_len = npages * PAGE_SIZE;
3713 + args.acl_pgbase = 0;
3714 + /* Let decode_getfacl know not to fail if the ACL data is larger than
3715 + * the page we send as a guess */
3716 + if (buf == NULL)
3717 + res.acl_flags |= NFS4_ACL_LEN_REQUEST;
3718 + resp_buf = page_address(pages[0]);
3719 +
3720 + dprintk("%s buf %p buflen %ld npages %d args.acl_len %ld\n",
3721 + __func__, buf, buflen, npages, args.acl_len);
3722 + ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode),
3723 + &msg, &args.seq_args, &res.seq_res, 0);
3724 if (ret)
3725 goto out_free;
3726 - if (res.acl_len > args.acl_len)
3727 - nfs4_write_cached_acl(inode, NULL, res.acl_len);
3728 +
3729 + acl_len = res.acl_len - res.acl_data_offset;
3730 + if (acl_len > args.acl_len)
3731 + nfs4_write_cached_acl(inode, NULL, acl_len);
3732 else
3733 - nfs4_write_cached_acl(inode, resp_buf, res.acl_len);
3734 + nfs4_write_cached_acl(inode, resp_buf + res.acl_data_offset,
3735 + acl_len);
3736 if (buf) {
3737 ret = -ERANGE;
3738 - if (res.acl_len > buflen)
3739 + if (acl_len > buflen)
3740 goto out_free;
3741 - if (localpage)
3742 - memcpy(buf, resp_buf, res.acl_len);
3743 + _copy_from_pages(buf, pages, res.acl_data_offset,
3744 + res.acl_len);
3745 }
3746 - ret = res.acl_len;
3747 + ret = acl_len;
3748 out_free:
3749 - if (localpage)
3750 - __free_page(localpage);
3751 + for (i = 0; i < npages; i++)
3752 + if (pages[i])
3753 + __free_page(pages[i]);
3754 + if (args.acl_scratch)
3755 + __free_page(args.acl_scratch);
3756 return ret;
3757 }
3758
3759 @@ -3621,6 +3637,8 @@ static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen)
3760 nfs_zap_acl_cache(inode);
3761 ret = nfs4_read_cached_acl(inode, buf, buflen);
3762 if (ret != -ENOENT)
3763 + /* -ENOENT is returned if there is no ACL or if there is an ACL
3764 + * but no cached acl data, just the acl length */
3765 return ret;
3766 return nfs4_get_acl_uncached(inode, buf, buflen);
3767 }
3768 diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
3769 index e6161b2..dcaf693 100644
3770 --- a/fs/nfs/nfs4xdr.c
3771 +++ b/fs/nfs/nfs4xdr.c
3772 @@ -2517,11 +2517,13 @@ static void nfs4_xdr_enc_getacl(struct rpc_rqst *req, struct xdr_stream *xdr,
3773 encode_compound_hdr(xdr, req, &hdr);
3774 encode_sequence(xdr, &args->seq_args, &hdr);
3775 encode_putfh(xdr, args->fh, &hdr);
3776 - replen = hdr.replen + op_decode_hdr_maxsz + nfs4_fattr_bitmap_maxsz + 1;
3777 + replen = hdr.replen + op_decode_hdr_maxsz + 1;
3778 encode_getattr_two(xdr, FATTR4_WORD0_ACL, 0, &hdr);
3779
3780 xdr_inline_pages(&req->rq_rcv_buf, replen << 2,
3781 args->acl_pages, args->acl_pgbase, args->acl_len);
3782 + xdr_set_scratch_buffer(xdr, page_address(args->acl_scratch), PAGE_SIZE);
3783 +
3784 encode_nops(&hdr);
3785 }
3786
3787 @@ -4957,17 +4959,18 @@ decode_restorefh(struct xdr_stream *xdr)
3788 }
3789
3790 static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,
3791 - size_t *acl_len)
3792 + struct nfs_getaclres *res)
3793 {
3794 - __be32 *savep;
3795 + __be32 *savep, *bm_p;
3796 uint32_t attrlen,
3797 bitmap[3] = {0};
3798 struct kvec *iov = req->rq_rcv_buf.head;
3799 int status;
3800
3801 - *acl_len = 0;
3802 + res->acl_len = 0;
3803 if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0)
3804 goto out;
3805 + bm_p = xdr->p;
3806 if ((status = decode_attr_bitmap(xdr, bitmap)) != 0)
3807 goto out;
3808 if ((status = decode_attr_length(xdr, &attrlen, &savep)) != 0)
3809 @@ -4979,18 +4982,30 @@ static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,
3810 size_t hdrlen;
3811 u32 recvd;
3812
3813 + /* The bitmap (xdr len + bitmaps) and the attr xdr len words
3814 + * are stored with the acl data to handle the problem of
3815 + * variable length bitmaps.*/
3816 + xdr->p = bm_p;
3817 + res->acl_data_offset = be32_to_cpup(bm_p) + 2;
3818 + res->acl_data_offset <<= 2;
3819 +
3820 /* We ignore &savep and don't do consistency checks on
3821 * the attr length. Let userspace figure it out.... */
3822 hdrlen = (u8 *)xdr->p - (u8 *)iov->iov_base;
3823 + attrlen += res->acl_data_offset;
3824 recvd = req->rq_rcv_buf.len - hdrlen;
3825 if (attrlen > recvd) {
3826 - dprintk("NFS: server cheating in getattr"
3827 - " acl reply: attrlen %u > recvd %u\n",
3828 + if (res->acl_flags & NFS4_ACL_LEN_REQUEST) {
3829 + /* getxattr interface called with a NULL buf */
3830 + res->acl_len = attrlen;
3831 + goto out;
3832 + }
3833 + dprintk("NFS: acl reply: attrlen %u > recvd %u\n",
3834 attrlen, recvd);
3835 return -EINVAL;
3836 }
3837 xdr_read_pages(xdr, attrlen);
3838 - *acl_len = attrlen;
3839 + res->acl_len = attrlen;
3840 } else
3841 status = -EOPNOTSUPP;
3842
3843 @@ -6028,7 +6043,7 @@ nfs4_xdr_dec_getacl(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
3844 status = decode_putfh(xdr);
3845 if (status)
3846 goto out;
3847 - status = decode_getacl(xdr, rqstp, &res->acl_len);
3848 + status = decode_getacl(xdr, rqstp, res);
3849
3850 out:
3851 return status;
3852 diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c
3853 index c807ab9..55d0128 100644
3854 --- a/fs/nfs/objlayout/objio_osd.c
3855 +++ b/fs/nfs/objlayout/objio_osd.c
3856 @@ -551,7 +551,8 @@ static const struct nfs_pageio_ops objio_pg_write_ops = {
3857 static struct pnfs_layoutdriver_type objlayout_type = {
3858 .id = LAYOUT_OSD2_OBJECTS,
3859 .name = "LAYOUT_OSD2_OBJECTS",
3860 - .flags = PNFS_LAYOUTRET_ON_SETATTR,
3861 + .flags = PNFS_LAYOUTRET_ON_SETATTR |
3862 + PNFS_LAYOUTRET_ON_ERROR,
3863
3864 .alloc_layout_hdr = objlayout_alloc_layout_hdr,
3865 .free_layout_hdr = objlayout_free_layout_hdr,
3866 diff --git a/fs/nfs/objlayout/objlayout.c b/fs/nfs/objlayout/objlayout.c
3867 index 72074e3..b3c2903 100644
3868 --- a/fs/nfs/objlayout/objlayout.c
3869 +++ b/fs/nfs/objlayout/objlayout.c
3870 @@ -254,6 +254,8 @@ objlayout_read_done(struct objlayout_io_res *oir, ssize_t status, bool sync)
3871 oir->status = rdata->task.tk_status = status;
3872 if (status >= 0)
3873 rdata->res.count = status;
3874 + else
3875 + rdata->pnfs_error = status;
3876 objlayout_iodone(oir);
3877 /* must not use oir after this point */
3878
3879 @@ -334,6 +336,8 @@ objlayout_write_done(struct objlayout_io_res *oir, ssize_t status, bool sync)
3880 if (status >= 0) {
3881 wdata->res.count = status;
3882 wdata->verf.committed = oir->committed;
3883 + } else {
3884 + wdata->pnfs_error = status;
3885 }
3886 objlayout_iodone(oir);
3887 /* must not use oir after this point */
3888 diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
3889 index 8e672a2..f881a63 100644
3890 --- a/fs/nfs/pnfs.c
3891 +++ b/fs/nfs/pnfs.c
3892 @@ -1178,6 +1178,15 @@ void pnfs_ld_write_done(struct nfs_write_data *data)
3893 put_lseg(data->lseg);
3894 data->lseg = NULL;
3895 dprintk("pnfs write error = %d\n", data->pnfs_error);
3896 + if (NFS_SERVER(data->inode)->pnfs_curr_ld->flags &
3897 + PNFS_LAYOUTRET_ON_ERROR) {
3898 + /* Don't lo_commit on error, Server will needs to
3899 + * preform a file recovery.
3900 + */
3901 + clear_bit(NFS_INO_LAYOUTCOMMIT,
3902 + &NFS_I(data->inode)->flags);
3903 + pnfs_return_layout(data->inode);
3904 + }
3905 }
3906 data->mds_ops->rpc_release(data);
3907 }
3908 @@ -1267,6 +1276,9 @@ static void pnfs_ld_handle_read_error(struct nfs_read_data *data)
3909 put_lseg(data->lseg);
3910 data->lseg = NULL;
3911 dprintk("pnfs write error = %d\n", data->pnfs_error);
3912 + if (NFS_SERVER(data->inode)->pnfs_curr_ld->flags &
3913 + PNFS_LAYOUTRET_ON_ERROR)
3914 + pnfs_return_layout(data->inode);
3915
3916 nfs_pageio_init_read_mds(&pgio, data->inode);
3917
3918 diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
3919 index 1509530..53d593a 100644
3920 --- a/fs/nfs/pnfs.h
3921 +++ b/fs/nfs/pnfs.h
3922 @@ -68,6 +68,7 @@ enum {
3923 enum layoutdriver_policy_flags {
3924 /* Should the pNFS client commit and return the layout upon a setattr */
3925 PNFS_LAYOUTRET_ON_SETATTR = 1 << 0,
3926 + PNFS_LAYOUTRET_ON_ERROR = 1 << 1,
3927 };
3928
3929 struct nfs4_deviceid_node;
3930 diff --git a/fs/nfs/super.c b/fs/nfs/super.c
3931 index 1347774..3ada13c 100644
3932 --- a/fs/nfs/super.c
3933 +++ b/fs/nfs/super.c
3934 @@ -909,10 +909,24 @@ static struct nfs_parsed_mount_data *nfs_alloc_parsed_mount_data(unsigned int ve
3935 data->auth_flavor_len = 1;
3936 data->version = version;
3937 data->minorversion = 0;
3938 + security_init_mnt_opts(&data->lsm_opts);
3939 }
3940 return data;
3941 }
3942
3943 +static void nfs_free_parsed_mount_data(struct nfs_parsed_mount_data *data)
3944 +{
3945 + if (data) {
3946 + kfree(data->client_address);
3947 + kfree(data->mount_server.hostname);
3948 + kfree(data->nfs_server.export_path);
3949 + kfree(data->nfs_server.hostname);
3950 + kfree(data->fscache_uniq);
3951 + security_free_mnt_opts(&data->lsm_opts);
3952 + kfree(data);
3953 + }
3954 +}
3955 +
3956 /*
3957 * Sanity-check a server address provided by the mount command.
3958 *
3959 @@ -2220,9 +2234,7 @@ static struct dentry *nfs_fs_mount(struct file_system_type *fs_type,
3960 data = nfs_alloc_parsed_mount_data(NFS_DEFAULT_VERSION);
3961 mntfh = nfs_alloc_fhandle();
3962 if (data == NULL || mntfh == NULL)
3963 - goto out_free_fh;
3964 -
3965 - security_init_mnt_opts(&data->lsm_opts);
3966 + goto out;
3967
3968 /* Validate the mount data */
3969 error = nfs_validate_mount_data(raw_data, data, mntfh, dev_name);
3970 @@ -2234,8 +2246,6 @@ static struct dentry *nfs_fs_mount(struct file_system_type *fs_type,
3971 #ifdef CONFIG_NFS_V4
3972 if (data->version == 4) {
3973 mntroot = nfs4_try_mount(flags, dev_name, data);
3974 - kfree(data->client_address);
3975 - kfree(data->nfs_server.export_path);
3976 goto out;
3977 }
3978 #endif /* CONFIG_NFS_V4 */
3979 @@ -2290,13 +2300,8 @@ static struct dentry *nfs_fs_mount(struct file_system_type *fs_type,
3980 s->s_flags |= MS_ACTIVE;
3981
3982 out:
3983 - kfree(data->nfs_server.hostname);
3984 - kfree(data->mount_server.hostname);
3985 - kfree(data->fscache_uniq);
3986 - security_free_mnt_opts(&data->lsm_opts);
3987 -out_free_fh:
3988 + nfs_free_parsed_mount_data(data);
3989 nfs_free_fhandle(mntfh);
3990 - kfree(data);
3991 return mntroot;
3992
3993 out_err_nosb:
3994 @@ -2623,9 +2628,7 @@ nfs4_remote_mount(struct file_system_type *fs_type, int flags,
3995
3996 mntfh = nfs_alloc_fhandle();
3997 if (data == NULL || mntfh == NULL)
3998 - goto out_free_fh;
3999 -
4000 - security_init_mnt_opts(&data->lsm_opts);
4001 + goto out;
4002
4003 /* Get a volume representation */
4004 server = nfs4_create_server(data, mntfh);
4005 @@ -2677,13 +2680,10 @@ nfs4_remote_mount(struct file_system_type *fs_type, int flags,
4006
4007 s->s_flags |= MS_ACTIVE;
4008
4009 - security_free_mnt_opts(&data->lsm_opts);
4010 nfs_free_fhandle(mntfh);
4011 return mntroot;
4012
4013 out:
4014 - security_free_mnt_opts(&data->lsm_opts);
4015 -out_free_fh:
4016 nfs_free_fhandle(mntfh);
4017 return ERR_PTR(error);
4018
4019 @@ -2838,7 +2838,7 @@ static struct dentry *nfs4_mount(struct file_system_type *fs_type,
4020
4021 data = nfs_alloc_parsed_mount_data(4);
4022 if (data == NULL)
4023 - goto out_free_data;
4024 + goto out;
4025
4026 /* Validate the mount data */
4027 error = nfs4_validate_mount_data(raw_data, data, dev_name);
4028 @@ -2852,12 +2852,7 @@ static struct dentry *nfs4_mount(struct file_system_type *fs_type,
4029 error = PTR_ERR(res);
4030
4031 out:
4032 - kfree(data->client_address);
4033 - kfree(data->nfs_server.export_path);
4034 - kfree(data->nfs_server.hostname);
4035 - kfree(data->fscache_uniq);
4036 -out_free_data:
4037 - kfree(data);
4038 + nfs_free_parsed_mount_data(data);
4039 dprintk("<-- nfs4_mount() = %d%s\n", error,
4040 error != 0 ? " [error]" : "");
4041 return res;
4042 diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
4043 index 62f3b90..5f312ab 100644
4044 --- a/fs/nfsd/export.c
4045 +++ b/fs/nfsd/export.c
4046 @@ -87,7 +87,7 @@ static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen)
4047 struct svc_expkey key;
4048 struct svc_expkey *ek = NULL;
4049
4050 - if (mesg[mlen-1] != '\n')
4051 + if (mlen < 1 || mesg[mlen-1] != '\n')
4052 return -EINVAL;
4053 mesg[mlen-1] = 0;
4054
4055 diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
4056 index 47e94e3..5abced7 100644
4057 --- a/fs/nfsd/nfs4state.c
4058 +++ b/fs/nfsd/nfs4state.c
4059 @@ -3809,16 +3809,29 @@ nevermind:
4060 deny->ld_type = NFS4_WRITE_LT;
4061 }
4062
4063 +static bool same_lockowner_ino(struct nfs4_lockowner *lo, struct inode *inode, clientid_t *clid, struct xdr_netobj *owner)
4064 +{
4065 + struct nfs4_ol_stateid *lst;
4066 +
4067 + if (!same_owner_str(&lo->lo_owner, owner, clid))
4068 + return false;
4069 + lst = list_first_entry(&lo->lo_owner.so_stateids,
4070 + struct nfs4_ol_stateid, st_perstateowner);
4071 + return lst->st_file->fi_inode == inode;
4072 +}
4073 +
4074 static struct nfs4_lockowner *
4075 find_lockowner_str(struct inode *inode, clientid_t *clid,
4076 struct xdr_netobj *owner)
4077 {
4078 unsigned int hashval = lock_ownerstr_hashval(inode, clid->cl_id, owner);
4079 + struct nfs4_lockowner *lo;
4080 struct nfs4_stateowner *op;
4081
4082 list_for_each_entry(op, &lock_ownerstr_hashtbl[hashval], so_strhash) {
4083 - if (same_owner_str(op, owner, clid))
4084 - return lockowner(op);
4085 + lo = lockowner(op);
4086 + if (same_lockowner_ino(lo, inode, clid, owner))
4087 + return lo;
4088 }
4089 return NULL;
4090 }
4091 diff --git a/fs/notify/mark.c b/fs/notify/mark.c
4092 index e14587d..f104d56 100644
4093 --- a/fs/notify/mark.c
4094 +++ b/fs/notify/mark.c
4095 @@ -135,9 +135,6 @@ void fsnotify_destroy_mark(struct fsnotify_mark *mark)
4096
4097 mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE;
4098
4099 - /* 1 from caller and 1 for being on i_list/g_list */
4100 - BUG_ON(atomic_read(&mark->refcnt) < 2);
4101 -
4102 spin_lock(&group->mark_lock);
4103
4104 if (mark->flags & FSNOTIFY_MARK_FLAG_INODE) {
4105 @@ -182,6 +179,11 @@ void fsnotify_destroy_mark(struct fsnotify_mark *mark)
4106 iput(inode);
4107
4108 /*
4109 + * We don't necessarily have a ref on mark from caller so the above iput
4110 + * may have already destroyed it. Don't touch from now on.
4111 + */
4112 +
4113 + /*
4114 * it's possible that this group tried to destroy itself, but this
4115 * this mark was simultaneously being freed by inode. If that's the
4116 * case, we finish freeing the group here.
4117 diff --git a/fs/proc/base.c b/fs/proc/base.c
4118 index 851ba3d..1fc1dca 100644
4119 --- a/fs/proc/base.c
4120 +++ b/fs/proc/base.c
4121 @@ -194,65 +194,7 @@ static int proc_root_link(struct inode *inode, struct path *path)
4122 return result;
4123 }
4124
4125 -static struct mm_struct *__check_mem_permission(struct task_struct *task)
4126 -{
4127 - struct mm_struct *mm;
4128 -
4129 - mm = get_task_mm(task);
4130 - if (!mm)
4131 - return ERR_PTR(-EINVAL);
4132 -
4133 - /*
4134 - * A task can always look at itself, in case it chooses
4135 - * to use system calls instead of load instructions.
4136 - */
4137 - if (task == current)
4138 - return mm;
4139 -
4140 - /*
4141 - * If current is actively ptrace'ing, and would also be
4142 - * permitted to freshly attach with ptrace now, permit it.
4143 - */
4144 - if (task_is_stopped_or_traced(task)) {
4145 - int match;
4146 - rcu_read_lock();
4147 - match = (ptrace_parent(task) == current);
4148 - rcu_read_unlock();
4149 - if (match && ptrace_may_access(task, PTRACE_MODE_ATTACH))
4150 - return mm;
4151 - }
4152 -
4153 - /*
4154 - * No one else is allowed.
4155 - */
4156 - mmput(mm);
4157 - return ERR_PTR(-EPERM);
4158 -}
4159 -
4160 -/*
4161 - * If current may access user memory in @task return a reference to the
4162 - * corresponding mm, otherwise ERR_PTR.
4163 - */
4164 -static struct mm_struct *check_mem_permission(struct task_struct *task)
4165 -{
4166 - struct mm_struct *mm;
4167 - int err;
4168 -
4169 - /*
4170 - * Avoid racing if task exec's as we might get a new mm but validate
4171 - * against old credentials.
4172 - */
4173 - err = mutex_lock_killable(&task->signal->cred_guard_mutex);
4174 - if (err)
4175 - return ERR_PTR(err);
4176 -
4177 - mm = __check_mem_permission(task);
4178 - mutex_unlock(&task->signal->cred_guard_mutex);
4179 -
4180 - return mm;
4181 -}
4182 -
4183 -struct mm_struct *mm_for_maps(struct task_struct *task)
4184 +static struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
4185 {
4186 struct mm_struct *mm;
4187 int err;
4188 @@ -263,7 +205,7 @@ struct mm_struct *mm_for_maps(struct task_struct *task)
4189
4190 mm = get_task_mm(task);
4191 if (mm && mm != current->mm &&
4192 - !ptrace_may_access(task, PTRACE_MODE_READ)) {
4193 + !ptrace_may_access(task, mode)) {
4194 mmput(mm);
4195 mm = ERR_PTR(-EACCES);
4196 }
4197 @@ -272,6 +214,11 @@ struct mm_struct *mm_for_maps(struct task_struct *task)
4198 return mm;
4199 }
4200
4201 +struct mm_struct *mm_for_maps(struct task_struct *task)
4202 +{
4203 + return mm_access(task, PTRACE_MODE_READ);
4204 +}
4205 +
4206 static int proc_pid_cmdline(struct task_struct *task, char * buffer)
4207 {
4208 int res = 0;
4209 @@ -816,38 +763,39 @@ static const struct file_operations proc_single_file_operations = {
4210
4211 static int mem_open(struct inode* inode, struct file* file)
4212 {
4213 - file->private_data = (void*)((long)current->self_exec_id);
4214 + struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
4215 + struct mm_struct *mm;
4216 +
4217 + if (!task)
4218 + return -ESRCH;
4219 +
4220 + mm = mm_access(task, PTRACE_MODE_ATTACH);
4221 + put_task_struct(task);
4222 +
4223 + if (IS_ERR(mm))
4224 + return PTR_ERR(mm);
4225 +
4226 /* OK to pass negative loff_t, we can catch out-of-range */
4227 file->f_mode |= FMODE_UNSIGNED_OFFSET;
4228 + file->private_data = mm;
4229 +
4230 return 0;
4231 }
4232
4233 static ssize_t mem_read(struct file * file, char __user * buf,
4234 size_t count, loff_t *ppos)
4235 {
4236 - struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
4237 + int ret;
4238 char *page;
4239 unsigned long src = *ppos;
4240 - int ret = -ESRCH;
4241 - struct mm_struct *mm;
4242 + struct mm_struct *mm = file->private_data;
4243
4244 - if (!task)
4245 - goto out_no_task;
4246 + if (!mm)
4247 + return 0;
4248
4249 - ret = -ENOMEM;
4250 page = (char *)__get_free_page(GFP_TEMPORARY);
4251 if (!page)
4252 - goto out;
4253 -
4254 - mm = check_mem_permission(task);
4255 - ret = PTR_ERR(mm);
4256 - if (IS_ERR(mm))
4257 - goto out_free;
4258 -
4259 - ret = -EIO;
4260 -
4261 - if (file->private_data != (void*)((long)current->self_exec_id))
4262 - goto out_put;
4263 + return -ENOMEM;
4264
4265 ret = 0;
4266
4267 @@ -874,13 +822,7 @@ static ssize_t mem_read(struct file * file, char __user * buf,
4268 }
4269 *ppos = src;
4270
4271 -out_put:
4272 - mmput(mm);
4273 -out_free:
4274 free_page((unsigned long) page);
4275 -out:
4276 - put_task_struct(task);
4277 -out_no_task:
4278 return ret;
4279 }
4280
4281 @@ -889,27 +831,15 @@ static ssize_t mem_write(struct file * file, const char __user *buf,
4282 {
4283 int copied;
4284 char *page;
4285 - struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
4286 unsigned long dst = *ppos;
4287 - struct mm_struct *mm;
4288 + struct mm_struct *mm = file->private_data;
4289
4290 - copied = -ESRCH;
4291 - if (!task)
4292 - goto out_no_task;
4293 + if (!mm)
4294 + return 0;
4295
4296 - copied = -ENOMEM;
4297 page = (char *)__get_free_page(GFP_TEMPORARY);
4298 if (!page)
4299 - goto out_task;
4300 -
4301 - mm = check_mem_permission(task);
4302 - copied = PTR_ERR(mm);
4303 - if (IS_ERR(mm))
4304 - goto out_free;
4305 -
4306 - copied = -EIO;
4307 - if (file->private_data != (void *)((long)current->self_exec_id))
4308 - goto out_mm;
4309 + return -ENOMEM;
4310
4311 copied = 0;
4312 while (count > 0) {
4313 @@ -933,13 +863,7 @@ static ssize_t mem_write(struct file * file, const char __user *buf,
4314 }
4315 *ppos = dst;
4316
4317 -out_mm:
4318 - mmput(mm);
4319 -out_free:
4320 free_page((unsigned long) page);
4321 -out_task:
4322 - put_task_struct(task);
4323 -out_no_task:
4324 return copied;
4325 }
4326
4327 @@ -959,11 +883,20 @@ loff_t mem_lseek(struct file *file, loff_t offset, int orig)
4328 return file->f_pos;
4329 }
4330
4331 +static int mem_release(struct inode *inode, struct file *file)
4332 +{
4333 + struct mm_struct *mm = file->private_data;
4334 +
4335 + mmput(mm);
4336 + return 0;
4337 +}
4338 +
4339 static const struct file_operations proc_mem_operations = {
4340 .llseek = mem_lseek,
4341 .read = mem_read,
4342 .write = mem_write,
4343 .open = mem_open,
4344 + .release = mem_release,
4345 };
4346
4347 static ssize_t environ_read(struct file *file, char __user *buf,
4348 diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
4349 index e418c5a..7dcd2a2 100644
4350 --- a/fs/proc/task_mmu.c
4351 +++ b/fs/proc/task_mmu.c
4352 @@ -518,6 +518,9 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
4353 if (!page)
4354 continue;
4355
4356 + if (PageReserved(page))
4357 + continue;
4358 +
4359 /* Clear accessed and referenced bits. */
4360 ptep_test_and_clear_young(vma, addr, pte);
4361 ClearPageReferenced(page);
4362 diff --git a/fs/proc/uptime.c b/fs/proc/uptime.c
4363 index 766b1d4..29166ec 100644
4364 --- a/fs/proc/uptime.c
4365 +++ b/fs/proc/uptime.c
4366 @@ -11,15 +11,20 @@ static int uptime_proc_show(struct seq_file *m, void *v)
4367 {
4368 struct timespec uptime;
4369 struct timespec idle;
4370 + cputime64_t idletime;
4371 + u64 nsec;
4372 + u32 rem;
4373 int i;
4374 - cputime_t idletime = cputime_zero;
4375
4376 + idletime = 0;
4377 for_each_possible_cpu(i)
4378 idletime = cputime64_add(idletime, kstat_cpu(i).cpustat.idle);
4379
4380 do_posix_clock_monotonic_gettime(&uptime);
4381 monotonic_to_bootbased(&uptime);
4382 - cputime_to_timespec(idletime, &idle);
4383 + nsec = cputime64_to_jiffies64(idletime) * TICK_NSEC;
4384 + idle.tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem);
4385 + idle.tv_nsec = rem;
4386 seq_printf(m, "%lu.%02lu %lu.%02lu\n",
4387 (unsigned long) uptime.tv_sec,
4388 (uptime.tv_nsec / (NSEC_PER_SEC / 100)),
4389 diff --git a/fs/ubifs/debug.h b/fs/ubifs/debug.h
4390 index 8d9c468..c9d2941 100644
4391 --- a/fs/ubifs/debug.h
4392 +++ b/fs/ubifs/debug.h
4393 @@ -175,22 +175,23 @@ const char *dbg_key_str1(const struct ubifs_info *c,
4394 const union ubifs_key *key);
4395
4396 /*
4397 - * DBGKEY macros require @dbg_lock to be held, which it is in the dbg message
4398 - * macros.
4399 + * TODO: these macros are now broken because there is no locking around them
4400 + * and we use a global buffer for the key string. This means that in case of
4401 + * concurrent execution we will end up with incorrect and messy key strings.
4402 */
4403 #define DBGKEY(key) dbg_key_str0(c, (key))
4404 #define DBGKEY1(key) dbg_key_str1(c, (key))
4405
4406 extern spinlock_t dbg_lock;
4407
4408 -#define ubifs_dbg_msg(type, fmt, ...) do { \
4409 - spin_lock(&dbg_lock); \
4410 - pr_debug("UBIFS DBG " type ": " fmt "\n", ##__VA_ARGS__); \
4411 - spin_unlock(&dbg_lock); \
4412 -} while (0)
4413 +#define ubifs_dbg_msg(type, fmt, ...) \
4414 + pr_debug("UBIFS DBG " type ": " fmt "\n", ##__VA_ARGS__)
4415
4416 /* Just a debugging messages not related to any specific UBIFS subsystem */
4417 -#define dbg_msg(fmt, ...) ubifs_dbg_msg("msg", fmt, ##__VA_ARGS__)
4418 +#define dbg_msg(fmt, ...) \
4419 + printk(KERN_DEBUG "UBIFS DBG (pid %d): %s: " fmt "\n", current->pid, \
4420 + __func__, ##__VA_ARGS__)
4421 +
4422 /* General messages */
4423 #define dbg_gen(fmt, ...) ubifs_dbg_msg("gen", fmt, ##__VA_ARGS__)
4424 /* Additional journal messages */
4425 diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c
4426 index 8a24f0c..286a051 100644
4427 --- a/fs/xfs/xfs_discard.c
4428 +++ b/fs/xfs/xfs_discard.c
4429 @@ -68,7 +68,7 @@ xfs_trim_extents(
4430 * Look up the longest btree in the AGF and start with it.
4431 */
4432 error = xfs_alloc_lookup_le(cur, 0,
4433 - XFS_BUF_TO_AGF(agbp)->agf_longest, &i);
4434 + be32_to_cpu(XFS_BUF_TO_AGF(agbp)->agf_longest), &i);
4435 if (error)
4436 goto out_del_cursor;
4437
4438 @@ -84,7 +84,7 @@ xfs_trim_extents(
4439 if (error)
4440 goto out_del_cursor;
4441 XFS_WANT_CORRUPTED_GOTO(i == 1, out_del_cursor);
4442 - ASSERT(flen <= XFS_BUF_TO_AGF(agbp)->agf_longest);
4443 + ASSERT(flen <= be32_to_cpu(XFS_BUF_TO_AGF(agbp)->agf_longest));
4444
4445 /*
4446 * Too small? Give up.
4447 diff --git a/include/acpi/acpi_numa.h b/include/acpi/acpi_numa.h
4448 index 1739726..451823c 100644
4449 --- a/include/acpi/acpi_numa.h
4450 +++ b/include/acpi/acpi_numa.h
4451 @@ -15,6 +15,7 @@ extern int pxm_to_node(int);
4452 extern int node_to_pxm(int);
4453 extern void __acpi_map_pxm_to_node(int, int);
4454 extern int acpi_map_pxm_to_node(int);
4455 +extern unsigned char acpi_srat_revision;
4456
4457 #endif /* CONFIG_ACPI_NUMA */
4458 #endif /* __ACP_NUMA_H */
4459 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
4460 index 94acd81..0ed1eb0 100644
4461 --- a/include/linux/blkdev.h
4462 +++ b/include/linux/blkdev.h
4463 @@ -675,6 +675,9 @@ extern int blk_insert_cloned_request(struct request_queue *q,
4464 struct request *rq);
4465 extern void blk_delay_queue(struct request_queue *, unsigned long);
4466 extern void blk_recount_segments(struct request_queue *, struct bio *);
4467 +extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
4468 +extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t,
4469 + unsigned int, void __user *);
4470 extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
4471 unsigned int, void __user *);
4472 extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
4473 diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
4474 index 5c4abce..b936763 100644
4475 --- a/include/linux/crash_dump.h
4476 +++ b/include/linux/crash_dump.h
4477 @@ -5,6 +5,7 @@
4478 #include <linux/kexec.h>
4479 #include <linux/device.h>
4480 #include <linux/proc_fs.h>
4481 +#include <linux/elf.h>
4482
4483 #define ELFCORE_ADDR_MAX (-1ULL)
4484 #define ELFCORE_ADDR_ERR (-2ULL)
4485 diff --git a/include/linux/dcache.h b/include/linux/dcache.h
4486 index ed9f74f..4eb8c80 100644
4487 --- a/include/linux/dcache.h
4488 +++ b/include/linux/dcache.h
4489 @@ -203,6 +203,7 @@ struct dentry_operations {
4490
4491 #define DCACHE_CANT_MOUNT 0x0100
4492 #define DCACHE_GENOCIDE 0x0200
4493 +#define DCACHE_SHRINK_LIST 0x0400
4494
4495 #define DCACHE_NFSFS_RENAMED 0x1000
4496 /* this dentry has been "silly renamed" and has to be deleted on the last
4497 diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
4498 index b87068a..81572af 100644
4499 --- a/include/linux/memcontrol.h
4500 +++ b/include/linux/memcontrol.h
4501 @@ -119,6 +119,8 @@ struct zone_reclaim_stat*
4502 mem_cgroup_get_reclaim_stat_from_page(struct page *page);
4503 extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
4504 struct task_struct *p);
4505 +extern void mem_cgroup_replace_page_cache(struct page *oldpage,
4506 + struct page *newpage);
4507
4508 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
4509 extern int do_swap_account;
4510 @@ -366,6 +368,10 @@ static inline
4511 void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
4512 {
4513 }
4514 +static inline void mem_cgroup_replace_page_cache(struct page *oldpage,
4515 + struct page *newpage)
4516 +{
4517 +}
4518 #endif /* CONFIG_CGROUP_MEM_CONT */
4519
4520 #if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM)
4521 diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
4522 index 2a7c533..6c898af 100644
4523 --- a/include/linux/nfs_xdr.h
4524 +++ b/include/linux/nfs_xdr.h
4525 @@ -602,11 +602,16 @@ struct nfs_getaclargs {
4526 size_t acl_len;
4527 unsigned int acl_pgbase;
4528 struct page ** acl_pages;
4529 + struct page * acl_scratch;
4530 struct nfs4_sequence_args seq_args;
4531 };
4532
4533 +/* getxattr ACL interface flags */
4534 +#define NFS4_ACL_LEN_REQUEST 0x0001 /* zero length getxattr buffer */
4535 struct nfs_getaclres {
4536 size_t acl_len;
4537 + size_t acl_data_offset;
4538 + int acl_flags;
4539 struct nfs4_sequence_res seq_res;
4540 };
4541
4542 diff --git a/include/linux/pci_regs.h b/include/linux/pci_regs.h
4543 index b5d9657..411c412 100644
4544 --- a/include/linux/pci_regs.h
4545 +++ b/include/linux/pci_regs.h
4546 @@ -392,7 +392,7 @@
4547 #define PCI_EXP_TYPE_DOWNSTREAM 0x6 /* Downstream Port */
4548 #define PCI_EXP_TYPE_PCI_BRIDGE 0x7 /* PCI/PCI-X Bridge */
4549 #define PCI_EXP_TYPE_RC_END 0x9 /* Root Complex Integrated Endpoint */
4550 -#define PCI_EXP_TYPE_RC_EC 0x10 /* Root Complex Event Collector */
4551 +#define PCI_EXP_TYPE_RC_EC 0xa /* Root Complex Event Collector */
4552 #define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */
4553 #define PCI_EXP_FLAGS_IRQ 0x3e00 /* Interrupt message number */
4554 #define PCI_EXP_DEVCAP 4 /* Device capabilities */
4555 diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
4556 index 9291ac3..6f10c9c 100644
4557 --- a/include/linux/shmem_fs.h
4558 +++ b/include/linux/shmem_fs.h
4559 @@ -48,6 +48,7 @@ extern struct file *shmem_file_setup(const char *name,
4560 loff_t size, unsigned long flags);
4561 extern int shmem_zero_setup(struct vm_area_struct *);
4562 extern int shmem_lock(struct file *file, int lock, struct user_struct *user);
4563 +extern void shmem_unlock_mapping(struct address_space *mapping);
4564 extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
4565 pgoff_t index, gfp_t gfp_mask);
4566 extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end);
4567 diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h
4568 index 85c50b4..c84e974 100644
4569 --- a/include/linux/sunrpc/svcsock.h
4570 +++ b/include/linux/sunrpc/svcsock.h
4571 @@ -34,7 +34,7 @@ struct svc_sock {
4572 /*
4573 * Function prototypes.
4574 */
4575 -void svc_close_all(struct list_head *);
4576 +void svc_close_all(struct svc_serv *);
4577 int svc_recv(struct svc_rqst *, long);
4578 int svc_send(struct svc_rqst *);
4579 void svc_drop(struct svc_rqst *);
4580 diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
4581 index a20970e..af70af3 100644
4582 --- a/include/linux/sunrpc/xdr.h
4583 +++ b/include/linux/sunrpc/xdr.h
4584 @@ -191,6 +191,8 @@ extern int xdr_decode_array2(struct xdr_buf *buf, unsigned int base,
4585 struct xdr_array2_desc *desc);
4586 extern int xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
4587 struct xdr_array2_desc *desc);
4588 +extern void _copy_from_pages(char *p, struct page **pages, size_t pgbase,
4589 + size_t len);
4590
4591 /*
4592 * Provide some simple tools for XDR buffer overflow-checking etc.
4593 diff --git a/include/linux/swap.h b/include/linux/swap.h
4594 index 1e22e12..67b3fa3 100644
4595 --- a/include/linux/swap.h
4596 +++ b/include/linux/swap.h
4597 @@ -272,7 +272,7 @@ static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order)
4598 #endif
4599
4600 extern int page_evictable(struct page *page, struct vm_area_struct *vma);
4601 -extern void scan_mapping_unevictable_pages(struct address_space *);
4602 +extern void check_move_unevictable_pages(struct page **, int nr_pages);
4603
4604 extern unsigned long scan_unevictable_pages;
4605 extern int scan_unevictable_handler(struct ctl_table *, int,
4606 diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
4607 index 4b752d5..45a7698 100644
4608 --- a/include/linux/videodev2.h
4609 +++ b/include/linux/videodev2.h
4610 @@ -1131,6 +1131,7 @@ struct v4l2_querymenu {
4611 #define V4L2_CTRL_FLAG_NEXT_CTRL 0x80000000
4612
4613 /* User-class control IDs defined by V4L2 */
4614 +#define V4L2_CID_MAX_CTRLS 1024
4615 #define V4L2_CID_BASE (V4L2_CTRL_CLASS_USER | 0x900)
4616 #define V4L2_CID_USER_BASE V4L2_CID_BASE
4617 /* IDs reserved for driver specific controls */
4618 diff --git a/include/media/tuner.h b/include/media/tuner.h
4619 index 89c290b..29e1920 100644
4620 --- a/include/media/tuner.h
4621 +++ b/include/media/tuner.h
4622 @@ -127,7 +127,6 @@
4623 #define TUNER_PHILIPS_FMD1216MEX_MK3 78
4624 #define TUNER_PHILIPS_FM1216MK5 79
4625 #define TUNER_PHILIPS_FQ1216LME_MK3 80 /* Active loopthrough, no FM */
4626 -#define TUNER_XC4000 81 /* Xceive Silicon Tuner */
4627
4628 #define TUNER_PARTSNIC_PTI_5NF05 81
4629 #define TUNER_PHILIPS_CU1216L 82
4630 @@ -136,6 +135,8 @@
4631 #define TUNER_PHILIPS_FQ1236_MK5 85 /* NTSC, TDA9885, no FM radio */
4632 #define TUNER_TENA_TNF_5337 86
4633
4634 +#define TUNER_XC4000 87 /* Xceive Silicon Tuner */
4635 +
4636 /* tv card specific */
4637 #define TDA9887_PRESENT (1<<0)
4638 #define TDA9887_PORT1_INACTIVE (1<<1)
4639 diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
4640 index 6873c7d..a79886c 100644
4641 --- a/include/target/target_core_base.h
4642 +++ b/include/target/target_core_base.h
4643 @@ -34,6 +34,7 @@
4644 #define TRANSPORT_SENSE_BUFFER SCSI_SENSE_BUFFERSIZE
4645 /* Used by transport_send_check_condition_and_sense() */
4646 #define SPC_SENSE_KEY_OFFSET 2
4647 +#define SPC_ADD_SENSE_LEN_OFFSET 7
4648 #define SPC_ASC_KEY_OFFSET 12
4649 #define SPC_ASCQ_KEY_OFFSET 13
4650 #define TRANSPORT_IQN_LEN 224
4651 diff --git a/include/xen/interface/io/xs_wire.h b/include/xen/interface/io/xs_wire.h
4652 index f6f07aa..7cdfca2 100644
4653 --- a/include/xen/interface/io/xs_wire.h
4654 +++ b/include/xen/interface/io/xs_wire.h
4655 @@ -87,4 +87,7 @@ struct xenstore_domain_interface {
4656 XENSTORE_RING_IDX rsp_cons, rsp_prod;
4657 };
4658
4659 +/* Violating this is very bad. See docs/misc/xenstore.txt. */
4660 +#define XENSTORE_PAYLOAD_MAX 4096
4661 +
4662 #endif /* _XS_WIRE_H */
4663 diff --git a/init/do_mounts.c b/init/do_mounts.c
4664 index 0f6e1d9..db6e5ee 100644
4665 --- a/init/do_mounts.c
4666 +++ b/init/do_mounts.c
4667 @@ -398,15 +398,42 @@ out:
4668 }
4669
4670 #ifdef CONFIG_ROOT_NFS
4671 +
4672 +#define NFSROOT_TIMEOUT_MIN 5
4673 +#define NFSROOT_TIMEOUT_MAX 30
4674 +#define NFSROOT_RETRY_MAX 5
4675 +
4676 static int __init mount_nfs_root(void)
4677 {
4678 char *root_dev, *root_data;
4679 + unsigned int timeout;
4680 + int try, err;
4681
4682 - if (nfs_root_data(&root_dev, &root_data) != 0)
4683 - return 0;
4684 - if (do_mount_root(root_dev, "nfs", root_mountflags, root_data) != 0)
4685 + err = nfs_root_data(&root_dev, &root_data);
4686 + if (err != 0)
4687 return 0;
4688 - return 1;
4689 +
4690 + /*
4691 + * The server or network may not be ready, so try several
4692 + * times. Stop after a few tries in case the client wants
4693 + * to fall back to other boot methods.
4694 + */
4695 + timeout = NFSROOT_TIMEOUT_MIN;
4696 + for (try = 1; ; try++) {
4697 + err = do_mount_root(root_dev, "nfs",
4698 + root_mountflags, root_data);
4699 + if (err == 0)
4700 + return 1;
4701 + if (try > NFSROOT_RETRY_MAX)
4702 + break;
4703 +
4704 + /* Wait, in case the server refused us immediately */
4705 + ssleep(timeout);
4706 + timeout <<= 1;
4707 + if (timeout > NFSROOT_TIMEOUT_MAX)
4708 + timeout = NFSROOT_TIMEOUT_MAX;
4709 + }
4710 + return 0;
4711 }
4712 #endif
4713
4714 diff --git a/ipc/shm.c b/ipc/shm.c
4715 index 02ecf2c..b76be5b 100644
4716 --- a/ipc/shm.c
4717 +++ b/ipc/shm.c
4718 @@ -870,9 +870,7 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
4719 case SHM_LOCK:
4720 case SHM_UNLOCK:
4721 {
4722 - struct file *uninitialized_var(shm_file);
4723 -
4724 - lru_add_drain_all(); /* drain pagevecs to lru lists */
4725 + struct file *shm_file;
4726
4727 shp = shm_lock_check(ns, shmid);
4728 if (IS_ERR(shp)) {
4729 @@ -895,22 +893,31 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
4730 err = security_shm_shmctl(shp, cmd);
4731 if (err)
4732 goto out_unlock;
4733 -
4734 - if(cmd==SHM_LOCK) {
4735 +
4736 + shm_file = shp->shm_file;
4737 + if (is_file_hugepages(shm_file))
4738 + goto out_unlock;
4739 +
4740 + if (cmd == SHM_LOCK) {
4741 struct user_struct *user = current_user();
4742 - if (!is_file_hugepages(shp->shm_file)) {
4743 - err = shmem_lock(shp->shm_file, 1, user);
4744 - if (!err && !(shp->shm_perm.mode & SHM_LOCKED)){
4745 - shp->shm_perm.mode |= SHM_LOCKED;
4746 - shp->mlock_user = user;
4747 - }
4748 + err = shmem_lock(shm_file, 1, user);
4749 + if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) {
4750 + shp->shm_perm.mode |= SHM_LOCKED;
4751 + shp->mlock_user = user;
4752 }
4753 - } else if (!is_file_hugepages(shp->shm_file)) {
4754 - shmem_lock(shp->shm_file, 0, shp->mlock_user);
4755 - shp->shm_perm.mode &= ~SHM_LOCKED;
4756 - shp->mlock_user = NULL;
4757 + goto out_unlock;
4758 }
4759 +
4760 + /* SHM_UNLOCK */
4761 + if (!(shp->shm_perm.mode & SHM_LOCKED))
4762 + goto out_unlock;
4763 + shmem_lock(shm_file, 0, shp->mlock_user);
4764 + shp->shm_perm.mode &= ~SHM_LOCKED;
4765 + shp->mlock_user = NULL;
4766 + get_file(shm_file);
4767 shm_unlock(shp);
4768 + shmem_unlock_mapping(shm_file->f_mapping);
4769 + fput(shm_file);
4770 goto out;
4771 }
4772 case IPC_RMID:
4773 diff --git a/kernel/kprobes.c b/kernel/kprobes.c
4774 index e5d8464..52fd049 100644
4775 --- a/kernel/kprobes.c
4776 +++ b/kernel/kprobes.c
4777 @@ -1077,6 +1077,7 @@ void __kprobes kprobe_flush_task(struct task_struct *tk)
4778 /* Early boot. kretprobe_table_locks not yet initialized. */
4779 return;
4780
4781 + INIT_HLIST_HEAD(&empty_rp);
4782 hash = hash_ptr(tk, KPROBE_HASH_BITS);
4783 head = &kretprobe_inst_table[hash];
4784 kretprobe_table_lock(hash, &flags);
4785 @@ -1085,7 +1086,6 @@ void __kprobes kprobe_flush_task(struct task_struct *tk)
4786 recycle_rp_inst(ri, &empty_rp);
4787 }
4788 kretprobe_table_unlock(hash, &flags);
4789 - INIT_HLIST_HEAD(&empty_rp);
4790 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
4791 hlist_del(&ri->hlist);
4792 kfree(ri);
4793 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
4794 index b1e8943..25b4f4d 100644
4795 --- a/kernel/trace/ftrace.c
4796 +++ b/kernel/trace/ftrace.c
4797 @@ -948,7 +948,7 @@ struct ftrace_func_probe {
4798 };
4799
4800 enum {
4801 - FTRACE_ENABLE_CALLS = (1 << 0),
4802 + FTRACE_UPDATE_CALLS = (1 << 0),
4803 FTRACE_DISABLE_CALLS = (1 << 1),
4804 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
4805 FTRACE_START_FUNC_RET = (1 << 3),
4806 @@ -1519,7 +1519,7 @@ int ftrace_text_reserved(void *start, void *end)
4807
4808
4809 static int
4810 -__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
4811 +__ftrace_replace_code(struct dyn_ftrace *rec, int update)
4812 {
4813 unsigned long ftrace_addr;
4814 unsigned long flag = 0UL;
4815 @@ -1527,17 +1527,17 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
4816 ftrace_addr = (unsigned long)FTRACE_ADDR;
4817
4818 /*
4819 - * If we are enabling tracing:
4820 + * If we are updating calls:
4821 *
4822 * If the record has a ref count, then we need to enable it
4823 * because someone is using it.
4824 *
4825 * Otherwise we make sure its disabled.
4826 *
4827 - * If we are disabling tracing, then disable all records that
4828 + * If we are disabling calls, then disable all records that
4829 * are enabled.
4830 */
4831 - if (enable && (rec->flags & ~FTRACE_FL_MASK))
4832 + if (update && (rec->flags & ~FTRACE_FL_MASK))
4833 flag = FTRACE_FL_ENABLED;
4834
4835 /* If the state of this record hasn't changed, then do nothing */
4836 @@ -1553,7 +1553,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
4837 return ftrace_make_nop(NULL, rec, ftrace_addr);
4838 }
4839
4840 -static void ftrace_replace_code(int enable)
4841 +static void ftrace_replace_code(int update)
4842 {
4843 struct dyn_ftrace *rec;
4844 struct ftrace_page *pg;
4845 @@ -1567,7 +1567,7 @@ static void ftrace_replace_code(int enable)
4846 if (rec->flags & FTRACE_FL_FREE)
4847 continue;
4848
4849 - failed = __ftrace_replace_code(rec, enable);
4850 + failed = __ftrace_replace_code(rec, update);
4851 if (failed) {
4852 ftrace_bug(failed, rec->ip);
4853 /* Stop processing */
4854 @@ -1623,7 +1623,7 @@ static int __ftrace_modify_code(void *data)
4855 */
4856 function_trace_stop++;
4857
4858 - if (*command & FTRACE_ENABLE_CALLS)
4859 + if (*command & FTRACE_UPDATE_CALLS)
4860 ftrace_replace_code(1);
4861 else if (*command & FTRACE_DISABLE_CALLS)
4862 ftrace_replace_code(0);
4863 @@ -1691,7 +1691,7 @@ static int ftrace_startup(struct ftrace_ops *ops, int command)
4864 return -ENODEV;
4865
4866 ftrace_start_up++;
4867 - command |= FTRACE_ENABLE_CALLS;
4868 + command |= FTRACE_UPDATE_CALLS;
4869
4870 /* ops marked global share the filter hashes */
4871 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
4872 @@ -1743,8 +1743,7 @@ static void ftrace_shutdown(struct ftrace_ops *ops, int command)
4873 if (ops != &global_ops || !global_start_up)
4874 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
4875
4876 - if (!ftrace_start_up)
4877 - command |= FTRACE_DISABLE_CALLS;
4878 + command |= FTRACE_UPDATE_CALLS;
4879
4880 if (saved_ftrace_func != ftrace_trace_function) {
4881 saved_ftrace_func = ftrace_trace_function;
4882 @@ -1766,7 +1765,7 @@ static void ftrace_startup_sysctl(void)
4883 saved_ftrace_func = NULL;
4884 /* ftrace_start_up is true if we want ftrace running */
4885 if (ftrace_start_up)
4886 - ftrace_run_update_code(FTRACE_ENABLE_CALLS);
4887 + ftrace_run_update_code(FTRACE_UPDATE_CALLS);
4888 }
4889
4890 static void ftrace_shutdown_sysctl(void)
4891 @@ -2919,7 +2918,7 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
4892 ret = ftrace_hash_move(ops, enable, orig_hash, hash);
4893 if (!ret && ops->flags & FTRACE_OPS_FL_ENABLED
4894 && ftrace_enabled)
4895 - ftrace_run_update_code(FTRACE_ENABLE_CALLS);
4896 + ftrace_run_update_code(FTRACE_UPDATE_CALLS);
4897
4898 mutex_unlock(&ftrace_lock);
4899
4900 @@ -3107,7 +3106,7 @@ ftrace_regex_release(struct inode *inode, struct file *file)
4901 orig_hash, iter->hash);
4902 if (!ret && (iter->ops->flags & FTRACE_OPS_FL_ENABLED)
4903 && ftrace_enabled)
4904 - ftrace_run_update_code(FTRACE_ENABLE_CALLS);
4905 + ftrace_run_update_code(FTRACE_UPDATE_CALLS);
4906
4907 mutex_unlock(&ftrace_lock);
4908 }
4909 diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
4910 index db110b8..f1539de 100644
4911 --- a/kernel/tracepoint.c
4912 +++ b/kernel/tracepoint.c
4913 @@ -634,10 +634,11 @@ static int tracepoint_module_coming(struct module *mod)
4914 int ret = 0;
4915
4916 /*
4917 - * We skip modules that tain the kernel, especially those with different
4918 - * module header (for forced load), to make sure we don't cause a crash.
4919 + * We skip modules that taint the kernel, especially those with different
4920 + * module headers (for forced load), to make sure we don't cause a crash.
4921 + * Staging and out-of-tree GPL modules are fine.
4922 */
4923 - if (mod->taints)
4924 + if (mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP)))
4925 return 0;
4926 mutex_lock(&tracepoints_mutex);
4927 tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL);
4928 diff --git a/mm/filemap.c b/mm/filemap.c
4929 index 5f0a3c9..90286a4 100644
4930 --- a/mm/filemap.c
4931 +++ b/mm/filemap.c
4932 @@ -393,24 +393,11 @@ EXPORT_SYMBOL(filemap_write_and_wait_range);
4933 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
4934 {
4935 int error;
4936 - struct mem_cgroup *memcg = NULL;
4937
4938 VM_BUG_ON(!PageLocked(old));
4939 VM_BUG_ON(!PageLocked(new));
4940 VM_BUG_ON(new->mapping);
4941
4942 - /*
4943 - * This is not page migration, but prepare_migration and
4944 - * end_migration does enough work for charge replacement.
4945 - *
4946 - * In the longer term we probably want a specialized function
4947 - * for moving the charge from old to new in a more efficient
4948 - * manner.
4949 - */
4950 - error = mem_cgroup_prepare_migration(old, new, &memcg, gfp_mask);
4951 - if (error)
4952 - return error;
4953 -
4954 error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
4955 if (!error) {
4956 struct address_space *mapping = old->mapping;
4957 @@ -432,13 +419,12 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
4958 if (PageSwapBacked(new))
4959 __inc_zone_page_state(new, NR_SHMEM);
4960 spin_unlock_irq(&mapping->tree_lock);
4961 + /* mem_cgroup codes must not be called under tree_lock */
4962 + mem_cgroup_replace_page_cache(old, new);
4963 radix_tree_preload_end();
4964 if (freepage)
4965 freepage(old);
4966 page_cache_release(old);
4967 - mem_cgroup_end_migration(memcg, old, new, true);
4968 - } else {
4969 - mem_cgroup_end_migration(memcg, old, new, false);
4970 }
4971
4972 return error;
4973 diff --git a/mm/memcontrol.c b/mm/memcontrol.c
4974 index b63f5f7..f538e9b 100644
4975 --- a/mm/memcontrol.c
4976 +++ b/mm/memcontrol.c
4977 @@ -3366,6 +3366,50 @@ void mem_cgroup_end_migration(struct mem_cgroup *memcg,
4978 cgroup_release_and_wakeup_rmdir(&memcg->css);
4979 }
4980
4981 +/*
4982 + * At replace page cache, newpage is not under any memcg but it's on
4983 + * LRU. So, this function doesn't touch res_counter but handles LRU
4984 + * in correct way. Both pages are locked so we cannot race with uncharge.
4985 + */
4986 +void mem_cgroup_replace_page_cache(struct page *oldpage,
4987 + struct page *newpage)
4988 +{
4989 + struct mem_cgroup *memcg;
4990 + struct page_cgroup *pc;
4991 + struct zone *zone;
4992 + enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;
4993 + unsigned long flags;
4994 +
4995 + if (mem_cgroup_disabled())
4996 + return;
4997 +
4998 + pc = lookup_page_cgroup(oldpage);
4999 + /* fix accounting on old pages */
5000 + lock_page_cgroup(pc);
5001 + memcg = pc->mem_cgroup;
5002 + mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), -1);
5003 + ClearPageCgroupUsed(pc);
5004 + unlock_page_cgroup(pc);
5005 +
5006 + if (PageSwapBacked(oldpage))
5007 + type = MEM_CGROUP_CHARGE_TYPE_SHMEM;
5008 +
5009 + zone = page_zone(newpage);
5010 + pc = lookup_page_cgroup(newpage);
5011 + /*
5012 + * Even if newpage->mapping was NULL before starting replacement,
5013 + * the newpage may be on LRU(or pagevec for LRU) already. We lock
5014 + * LRU while we overwrite pc->mem_cgroup.
5015 + */
5016 + spin_lock_irqsave(&zone->lru_lock, flags);
5017 + if (PageLRU(newpage))
5018 + del_page_from_lru_list(zone, newpage, page_lru(newpage));
5019 + __mem_cgroup_commit_charge(memcg, newpage, 1, pc, type);
5020 + if (PageLRU(newpage))
5021 + add_page_to_lru_list(zone, newpage, page_lru(newpage));
5022 + spin_unlock_irqrestore(&zone->lru_lock, flags);
5023 +}
5024 +
5025 #ifdef CONFIG_DEBUG_VM
5026 static struct page_cgroup *lookup_page_cgroup_used(struct page *page)
5027 {
5028 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
5029 index 2b8ba3a..485be89 100644
5030 --- a/mm/page_alloc.c
5031 +++ b/mm/page_alloc.c
5032 @@ -5608,6 +5608,17 @@ __count_immobile_pages(struct zone *zone, struct page *page, int count)
5033 bool is_pageblock_removable_nolock(struct page *page)
5034 {
5035 struct zone *zone = page_zone(page);
5036 + unsigned long pfn = page_to_pfn(page);
5037 +
5038 + /*
5039 + * We have to be careful here because we are iterating over memory
5040 + * sections which are not zone aware so we might end up outside of
5041 + * the zone but still within the section.
5042 + */
5043 + if (!zone || zone->zone_start_pfn > pfn ||
5044 + zone->zone_start_pfn + zone->spanned_pages <= pfn)
5045 + return false;
5046 +
5047 return __count_immobile_pages(zone, page, 0);
5048 }
5049
5050 diff --git a/mm/shmem.c b/mm/shmem.c
5051 index d672250..6c253f7 100644
5052 --- a/mm/shmem.c
5053 +++ b/mm/shmem.c
5054 @@ -379,7 +379,7 @@ static int shmem_free_swap(struct address_space *mapping,
5055 /*
5056 * Pagevec may contain swap entries, so shuffle up pages before releasing.
5057 */
5058 -static void shmem_pagevec_release(struct pagevec *pvec)
5059 +static void shmem_deswap_pagevec(struct pagevec *pvec)
5060 {
5061 int i, j;
5062
5063 @@ -389,7 +389,36 @@ static void shmem_pagevec_release(struct pagevec *pvec)
5064 pvec->pages[j++] = page;
5065 }
5066 pvec->nr = j;
5067 - pagevec_release(pvec);
5068 +}
5069 +
5070 +/*
5071 + * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
5072 + */
5073 +void shmem_unlock_mapping(struct address_space *mapping)
5074 +{
5075 + struct pagevec pvec;
5076 + pgoff_t indices[PAGEVEC_SIZE];
5077 + pgoff_t index = 0;
5078 +
5079 + pagevec_init(&pvec, 0);
5080 + /*
5081 + * Minor point, but we might as well stop if someone else SHM_LOCKs it.
5082 + */
5083 + while (!mapping_unevictable(mapping)) {
5084 + /*
5085 + * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it
5086 + * has finished, if it hits a row of PAGEVEC_SIZE swap entries.
5087 + */
5088 + pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
5089 + PAGEVEC_SIZE, pvec.pages, indices);
5090 + if (!pvec.nr)
5091 + break;
5092 + index = indices[pvec.nr - 1] + 1;
5093 + shmem_deswap_pagevec(&pvec);
5094 + check_move_unevictable_pages(pvec.pages, pvec.nr);
5095 + pagevec_release(&pvec);
5096 + cond_resched();
5097 + }
5098 }
5099
5100 /*
5101 @@ -440,7 +469,8 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
5102 }
5103 unlock_page(page);
5104 }
5105 - shmem_pagevec_release(&pvec);
5106 + shmem_deswap_pagevec(&pvec);
5107 + pagevec_release(&pvec);
5108 mem_cgroup_uncharge_end();
5109 cond_resched();
5110 index++;
5111 @@ -470,7 +500,8 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
5112 continue;
5113 }
5114 if (index == start && indices[0] > end) {
5115 - shmem_pagevec_release(&pvec);
5116 + shmem_deswap_pagevec(&pvec);
5117 + pagevec_release(&pvec);
5118 break;
5119 }
5120 mem_cgroup_uncharge_start();
5121 @@ -494,7 +525,8 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
5122 }
5123 unlock_page(page);
5124 }
5125 - shmem_pagevec_release(&pvec);
5126 + shmem_deswap_pagevec(&pvec);
5127 + pagevec_release(&pvec);
5128 mem_cgroup_uncharge_end();
5129 index++;
5130 }
5131 @@ -1068,13 +1100,6 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user)
5132 user_shm_unlock(inode->i_size, user);
5133 info->flags &= ~VM_LOCKED;
5134 mapping_clear_unevictable(file->f_mapping);
5135 - /*
5136 - * Ensure that a racing putback_lru_page() can see
5137 - * the pages of this mapping are evictable when we
5138 - * skip them due to !PageLRU during the scan.
5139 - */
5140 - smp_mb__after_clear_bit();
5141 - scan_mapping_unevictable_pages(file->f_mapping);
5142 }
5143 retval = 0;
5144
5145 @@ -2446,6 +2471,10 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user)
5146 return 0;
5147 }
5148
5149 +void shmem_unlock_mapping(struct address_space *mapping)
5150 +{
5151 +}
5152 +
5153 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
5154 {
5155 truncate_inode_pages_range(inode->i_mapping, lstart, lend);
5156 diff --git a/mm/slub.c b/mm/slub.c
5157 index ed3334d..1a919f0 100644
5158 --- a/mm/slub.c
5159 +++ b/mm/slub.c
5160 @@ -2166,6 +2166,11 @@ redo:
5161 goto new_slab;
5162 }
5163
5164 + /* must check again c->freelist in case of cpu migration or IRQ */
5165 + object = c->freelist;
5166 + if (object)
5167 + goto load_freelist;
5168 +
5169 stat(s, ALLOC_SLOWPATH);
5170
5171 do {
5172 diff --git a/mm/vmscan.c b/mm/vmscan.c
5173 index f54a05b..cb33d9c 100644
5174 --- a/mm/vmscan.c
5175 +++ b/mm/vmscan.c
5176 @@ -636,7 +636,7 @@ redo:
5177 * When racing with an mlock or AS_UNEVICTABLE clearing
5178 * (page is unlocked) make sure that if the other thread
5179 * does not observe our setting of PG_lru and fails
5180 - * isolation/check_move_unevictable_page,
5181 + * isolation/check_move_unevictable_pages,
5182 * we see PG_mlocked/AS_UNEVICTABLE cleared below and move
5183 * the page back to the evictable list.
5184 *
5185 @@ -3353,97 +3353,59 @@ int page_evictable(struct page *page, struct vm_area_struct *vma)
5186 return 1;
5187 }
5188
5189 +#ifdef CONFIG_SHMEM
5190 /**
5191 - * check_move_unevictable_page - check page for evictability and move to appropriate zone lru list
5192 - * @page: page to check evictability and move to appropriate lru list
5193 - * @zone: zone page is in
5194 + * check_move_unevictable_pages - check pages for evictability and move to appropriate zone lru list
5195 + * @pages: array of pages to check
5196 + * @nr_pages: number of pages to check
5197 *
5198 - * Checks a page for evictability and moves the page to the appropriate
5199 - * zone lru list.
5200 + * Checks pages for evictability and moves them to the appropriate lru list.
5201 *
5202 - * Restrictions: zone->lru_lock must be held, page must be on LRU and must
5203 - * have PageUnevictable set.
5204 + * This function is only used for SysV IPC SHM_UNLOCK.
5205 */
5206 -static void check_move_unevictable_page(struct page *page, struct zone *zone)
5207 +void check_move_unevictable_pages(struct page **pages, int nr_pages)
5208 {
5209 - VM_BUG_ON(PageActive(page));
5210 -
5211 -retry:
5212 - ClearPageUnevictable(page);
5213 - if (page_evictable(page, NULL)) {
5214 - enum lru_list l = page_lru_base_type(page);
5215 + struct zone *zone = NULL;
5216 + int pgscanned = 0;
5217 + int pgrescued = 0;
5218 + int i;
5219
5220 - __dec_zone_state(zone, NR_UNEVICTABLE);
5221 - list_move(&page->lru, &zone->lru[l].list);
5222 - mem_cgroup_move_lists(page, LRU_UNEVICTABLE, l);
5223 - __inc_zone_state(zone, NR_INACTIVE_ANON + l);
5224 - __count_vm_event(UNEVICTABLE_PGRESCUED);
5225 - } else {
5226 - /*
5227 - * rotate unevictable list
5228 - */
5229 - SetPageUnevictable(page);
5230 - list_move(&page->lru, &zone->lru[LRU_UNEVICTABLE].list);
5231 - mem_cgroup_rotate_lru_list(page, LRU_UNEVICTABLE);
5232 - if (page_evictable(page, NULL))
5233 - goto retry;
5234 - }
5235 -}
5236 + for (i = 0; i < nr_pages; i++) {
5237 + struct page *page = pages[i];
5238 + struct zone *pagezone;
5239
5240 -/**
5241 - * scan_mapping_unevictable_pages - scan an address space for evictable pages
5242 - * @mapping: struct address_space to scan for evictable pages
5243 - *
5244 - * Scan all pages in mapping. Check unevictable pages for
5245 - * evictability and move them to the appropriate zone lru list.
5246 - */
5247 -void scan_mapping_unevictable_pages(struct address_space *mapping)
5248 -{
5249 - pgoff_t next = 0;
5250 - pgoff_t end = (i_size_read(mapping->host) + PAGE_CACHE_SIZE - 1) >>
5251 - PAGE_CACHE_SHIFT;
5252 - struct zone *zone;
5253 - struct pagevec pvec;
5254 + pgscanned++;
5255 + pagezone = page_zone(page);
5256 + if (pagezone != zone) {
5257 + if (zone)
5258 + spin_unlock_irq(&zone->lru_lock);
5259 + zone = pagezone;
5260 + spin_lock_irq(&zone->lru_lock);
5261 + }
5262
5263 - if (mapping->nrpages == 0)
5264 - return;
5265 + if (!PageLRU(page) || !PageUnevictable(page))
5266 + continue;
5267
5268 - pagevec_init(&pvec, 0);
5269 - while (next < end &&
5270 - pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
5271 - int i;
5272 - int pg_scanned = 0;
5273 -
5274 - zone = NULL;
5275 -
5276 - for (i = 0; i < pagevec_count(&pvec); i++) {
5277 - struct page *page = pvec.pages[i];
5278 - pgoff_t page_index = page->index;
5279 - struct zone *pagezone = page_zone(page);
5280 -
5281 - pg_scanned++;
5282 - if (page_index > next)
5283 - next = page_index;
5284 - next++;
5285 -
5286 - if (pagezone != zone) {
5287 - if (zone)
5288 - spin_unlock_irq(&zone->lru_lock);
5289 - zone = pagezone;
5290 - spin_lock_irq(&zone->lru_lock);
5291 - }
5292 + if (page_evictable(page, NULL)) {
5293 + enum lru_list lru = page_lru_base_type(page);
5294
5295 - if (PageLRU(page) && PageUnevictable(page))
5296 - check_move_unevictable_page(page, zone);
5297 + VM_BUG_ON(PageActive(page));
5298 + ClearPageUnevictable(page);
5299 + __dec_zone_state(zone, NR_UNEVICTABLE);
5300 + list_move(&page->lru, &zone->lru[lru].list);
5301 + mem_cgroup_move_lists(page, LRU_UNEVICTABLE, lru);
5302 + __inc_zone_state(zone, NR_INACTIVE_ANON + lru);
5303 + pgrescued++;
5304 }
5305 - if (zone)
5306 - spin_unlock_irq(&zone->lru_lock);
5307 - pagevec_release(&pvec);
5308 -
5309 - count_vm_events(UNEVICTABLE_PGSCANNED, pg_scanned);
5310 }
5311
5312 + if (zone) {
5313 + __count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
5314 + __count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
5315 + spin_unlock_irq(&zone->lru_lock);
5316 + }
5317 }
5318 +#endif /* CONFIG_SHMEM */
5319
5320 static void warn_scan_unevictable_pages(void)
5321 {
5322 diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
5323 index ea10a51..73495f1 100644
5324 --- a/net/mac80211/ieee80211_i.h
5325 +++ b/net/mac80211/ieee80211_i.h
5326 @@ -702,6 +702,8 @@ struct tpt_led_trigger {
5327 * well be on the operating channel
5328 * @SCAN_HW_SCANNING: The hardware is scanning for us, we have no way to
5329 * determine if we are on the operating channel or not
5330 + * @SCAN_OFF_CHANNEL: We're off our operating channel for scanning,
5331 + * gets only set in conjunction with SCAN_SW_SCANNING
5332 * @SCAN_COMPLETED: Set for our scan work function when the driver reported
5333 * that the scan completed.
5334 * @SCAN_ABORTED: Set for our scan work function when the driver reported
5335 @@ -710,6 +712,7 @@ struct tpt_led_trigger {
5336 enum {
5337 SCAN_SW_SCANNING,
5338 SCAN_HW_SCANNING,
5339 + SCAN_OFF_CHANNEL,
5340 SCAN_COMPLETED,
5341 SCAN_ABORTED,
5342 };
5343 @@ -1140,14 +1143,10 @@ int ieee80211_request_sched_scan_stop(struct ieee80211_sub_if_data *sdata);
5344 void ieee80211_sched_scan_stopped_work(struct work_struct *work);
5345
5346 /* off-channel helpers */
5347 -bool ieee80211_cfg_on_oper_channel(struct ieee80211_local *local);
5348 -void ieee80211_offchannel_enable_all_ps(struct ieee80211_local *local,
5349 - bool tell_ap);
5350 -void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local,
5351 - bool offchannel_ps_enable);
5352 +void ieee80211_offchannel_stop_beaconing(struct ieee80211_local *local);
5353 +void ieee80211_offchannel_stop_station(struct ieee80211_local *local);
5354 void ieee80211_offchannel_return(struct ieee80211_local *local,
5355 - bool enable_beaconing,
5356 - bool offchannel_ps_disable);
5357 + bool enable_beaconing);
5358 void ieee80211_hw_roc_setup(struct ieee80211_local *local);
5359
5360 /* interface handling */
5361 diff --git a/net/mac80211/main.c b/net/mac80211/main.c
5362 index cae4435..a7536fd 100644
5363 --- a/net/mac80211/main.c
5364 +++ b/net/mac80211/main.c
5365 @@ -92,47 +92,6 @@ static void ieee80211_reconfig_filter(struct work_struct *work)
5366 ieee80211_configure_filter(local);
5367 }
5368
5369 -/*
5370 - * Returns true if we are logically configured to be on
5371 - * the operating channel AND the hardware-conf is currently
5372 - * configured on the operating channel. Compares channel-type
5373 - * as well.
5374 - */
5375 -bool ieee80211_cfg_on_oper_channel(struct ieee80211_local *local)
5376 -{
5377 - struct ieee80211_channel *chan, *scan_chan;
5378 - enum nl80211_channel_type channel_type;
5379 -
5380 - /* This logic needs to match logic in ieee80211_hw_config */
5381 - if (local->scan_channel) {
5382 - chan = local->scan_channel;
5383 - /* If scanning on oper channel, use whatever channel-type
5384 - * is currently in use.
5385 - */
5386 - if (chan == local->oper_channel)
5387 - channel_type = local->_oper_channel_type;
5388 - else
5389 - channel_type = NL80211_CHAN_NO_HT;
5390 - } else if (local->tmp_channel) {
5391 - chan = scan_chan = local->tmp_channel;
5392 - channel_type = local->tmp_channel_type;
5393 - } else {
5394 - chan = local->oper_channel;
5395 - channel_type = local->_oper_channel_type;
5396 - }
5397 -
5398 - if (chan != local->oper_channel ||
5399 - channel_type != local->_oper_channel_type)
5400 - return false;
5401 -
5402 - /* Check current hardware-config against oper_channel. */
5403 - if ((local->oper_channel != local->hw.conf.channel) ||
5404 - (local->_oper_channel_type != local->hw.conf.channel_type))
5405 - return false;
5406 -
5407 - return true;
5408 -}
5409 -
5410 int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
5411 {
5412 struct ieee80211_channel *chan, *scan_chan;
5413 @@ -145,9 +104,6 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
5414
5415 scan_chan = local->scan_channel;
5416
5417 - /* If this off-channel logic ever changes, ieee80211_on_oper_channel
5418 - * may need to change as well.
5419 - */
5420 offchannel_flag = local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL;
5421 if (scan_chan) {
5422 chan = scan_chan;
5423 @@ -158,19 +114,17 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
5424 channel_type = local->_oper_channel_type;
5425 else
5426 channel_type = NL80211_CHAN_NO_HT;
5427 - } else if (local->tmp_channel) {
5428 + local->hw.conf.flags |= IEEE80211_CONF_OFFCHANNEL;
5429 + } else if (local->tmp_channel &&
5430 + local->oper_channel != local->tmp_channel) {
5431 chan = scan_chan = local->tmp_channel;
5432 channel_type = local->tmp_channel_type;
5433 + local->hw.conf.flags |= IEEE80211_CONF_OFFCHANNEL;
5434 } else {
5435 chan = local->oper_channel;
5436 channel_type = local->_oper_channel_type;
5437 - }
5438 -
5439 - if (chan != local->oper_channel ||
5440 - channel_type != local->_oper_channel_type)
5441 - local->hw.conf.flags |= IEEE80211_CONF_OFFCHANNEL;
5442 - else
5443 local->hw.conf.flags &= ~IEEE80211_CONF_OFFCHANNEL;
5444 + }
5445
5446 offchannel_flag ^= local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL;
5447
5448 @@ -279,7 +233,7 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
5449
5450 if (changed & BSS_CHANGED_BEACON_ENABLED) {
5451 if (local->quiescing || !ieee80211_sdata_running(sdata) ||
5452 - test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state)) {
5453 + test_bit(SCAN_SW_SCANNING, &local->scanning)) {
5454 sdata->vif.bss_conf.enable_beacon = false;
5455 } else {
5456 /*
5457 diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
5458 index 3d41441..1b239be 100644
5459 --- a/net/mac80211/offchannel.c
5460 +++ b/net/mac80211/offchannel.c
5461 @@ -18,14 +18,10 @@
5462 #include "driver-trace.h"
5463
5464 /*
5465 - * Tell our hardware to disable PS.
5466 - * Optionally inform AP that we will go to sleep so that it will buffer
5467 - * the frames while we are doing off-channel work. This is optional
5468 - * because we *may* be doing work on-operating channel, and want our
5469 - * hardware unconditionally awake, but still let the AP send us normal frames.
5470 + * inform AP that we will go to sleep so that it will buffer the frames
5471 + * while we scan
5472 */
5473 -static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata,
5474 - bool tell_ap)
5475 +static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata)
5476 {
5477 struct ieee80211_local *local = sdata->local;
5478 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
5479 @@ -46,8 +42,8 @@ static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata,
5480 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
5481 }
5482
5483 - if (tell_ap && (!local->offchannel_ps_enabled ||
5484 - !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)))
5485 + if (!(local->offchannel_ps_enabled) ||
5486 + !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK))
5487 /*
5488 * If power save was enabled, no need to send a nullfunc
5489 * frame because AP knows that we are sleeping. But if the
5490 @@ -82,9 +78,6 @@ static void ieee80211_offchannel_ps_disable(struct ieee80211_sub_if_data *sdata)
5491 * we are sleeping, let's just enable power save mode in
5492 * hardware.
5493 */
5494 - /* TODO: Only set hardware if CONF_PS changed?
5495 - * TODO: Should we set offchannel_ps_enabled to false?
5496 - */
5497 local->hw.conf.flags |= IEEE80211_CONF_PS;
5498 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
5499 } else if (local->hw.conf.dynamic_ps_timeout > 0) {
5500 @@ -103,61 +96,63 @@ static void ieee80211_offchannel_ps_disable(struct ieee80211_sub_if_data *sdata)
5501 ieee80211_sta_reset_conn_monitor(sdata);
5502 }
5503
5504 -void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local,
5505 - bool offchannel_ps_enable)
5506 +void ieee80211_offchannel_stop_beaconing(struct ieee80211_local *local)
5507 {
5508 struct ieee80211_sub_if_data *sdata;
5509
5510 - /*
5511 - * notify the AP about us leaving the channel and stop all
5512 - * STA interfaces.
5513 - */
5514 mutex_lock(&local->iflist_mtx);
5515 list_for_each_entry(sdata, &local->interfaces, list) {
5516 if (!ieee80211_sdata_running(sdata))
5517 continue;
5518
5519 - if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
5520 - set_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
5521 -
5522 - /* Check to see if we should disable beaconing. */
5523 + /* disable beaconing */
5524 if (sdata->vif.type == NL80211_IFTYPE_AP ||
5525 sdata->vif.type == NL80211_IFTYPE_ADHOC ||
5526 sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
5527 ieee80211_bss_info_change_notify(
5528 sdata, BSS_CHANGED_BEACON_ENABLED);
5529
5530 - if (sdata->vif.type != NL80211_IFTYPE_MONITOR) {
5531 + /*
5532 + * only handle non-STA interfaces here, STA interfaces
5533 + * are handled in ieee80211_offchannel_stop_station(),
5534 + * e.g., from the background scan state machine.
5535 + *
5536 + * In addition, do not stop monitor interface to allow it to be
5537 + * used from user space controlled off-channel operations.
5538 + */
5539 + if (sdata->vif.type != NL80211_IFTYPE_STATION &&
5540 + sdata->vif.type != NL80211_IFTYPE_MONITOR) {
5541 + set_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
5542 netif_tx_stop_all_queues(sdata->dev);
5543 - if (offchannel_ps_enable &&
5544 - (sdata->vif.type == NL80211_IFTYPE_STATION) &&
5545 - sdata->u.mgd.associated)
5546 - ieee80211_offchannel_ps_enable(sdata, true);
5547 }
5548 }
5549 mutex_unlock(&local->iflist_mtx);
5550 }
5551
5552 -void ieee80211_offchannel_enable_all_ps(struct ieee80211_local *local,
5553 - bool tell_ap)
5554 +void ieee80211_offchannel_stop_station(struct ieee80211_local *local)
5555 {
5556 struct ieee80211_sub_if_data *sdata;
5557
5558 + /*
5559 + * notify the AP about us leaving the channel and stop all STA interfaces
5560 + */
5561 mutex_lock(&local->iflist_mtx);
5562 list_for_each_entry(sdata, &local->interfaces, list) {
5563 if (!ieee80211_sdata_running(sdata))
5564 continue;
5565
5566 - if (sdata->vif.type == NL80211_IFTYPE_STATION &&
5567 - sdata->u.mgd.associated)
5568 - ieee80211_offchannel_ps_enable(sdata, tell_ap);
5569 + if (sdata->vif.type == NL80211_IFTYPE_STATION) {
5570 + set_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
5571 + netif_tx_stop_all_queues(sdata->dev);
5572 + if (sdata->u.mgd.associated)
5573 + ieee80211_offchannel_ps_enable(sdata);
5574 + }
5575 }
5576 mutex_unlock(&local->iflist_mtx);
5577 }
5578
5579 void ieee80211_offchannel_return(struct ieee80211_local *local,
5580 - bool enable_beaconing,
5581 - bool offchannel_ps_disable)
5582 + bool enable_beaconing)
5583 {
5584 struct ieee80211_sub_if_data *sdata;
5585
5586 @@ -167,8 +162,7 @@ void ieee80211_offchannel_return(struct ieee80211_local *local,
5587 continue;
5588
5589 /* Tell AP we're back */
5590 - if (offchannel_ps_disable &&
5591 - sdata->vif.type == NL80211_IFTYPE_STATION) {
5592 + if (sdata->vif.type == NL80211_IFTYPE_STATION) {
5593 if (sdata->u.mgd.associated)
5594 ieee80211_offchannel_ps_disable(sdata);
5595 }
5596 @@ -188,7 +182,7 @@ void ieee80211_offchannel_return(struct ieee80211_local *local,
5597 netif_tx_wake_all_queues(sdata->dev);
5598 }
5599
5600 - /* Check to see if we should re-enable beaconing */
5601 + /* re-enable beaconing */
5602 if (enable_beaconing &&
5603 (sdata->vif.type == NL80211_IFTYPE_AP ||
5604 sdata->vif.type == NL80211_IFTYPE_ADHOC ||
5605 diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
5606 index fb123e2..5c51607 100644
5607 --- a/net/mac80211/rx.c
5608 +++ b/net/mac80211/rx.c
5609 @@ -421,10 +421,16 @@ ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx)
5610 return RX_CONTINUE;
5611
5612 if (test_bit(SCAN_HW_SCANNING, &local->scanning) ||
5613 - test_bit(SCAN_SW_SCANNING, &local->scanning) ||
5614 local->sched_scanning)
5615 return ieee80211_scan_rx(rx->sdata, skb);
5616
5617 + if (test_bit(SCAN_SW_SCANNING, &local->scanning)) {
5618 + /* drop all the other packets during a software scan anyway */
5619 + if (ieee80211_scan_rx(rx->sdata, skb) != RX_QUEUED)
5620 + dev_kfree_skb(skb);
5621 + return RX_QUEUED;
5622 + }
5623 +
5624 /* scanning finished during invoking of handlers */
5625 I802_DEBUG_INC(local->rx_handlers_drop_passive_scan);
5626 return RX_DROP_UNUSABLE;
5627 @@ -2858,7 +2864,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
5628 local->dot11ReceivedFragmentCount++;
5629
5630 if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning) ||
5631 - test_bit(SCAN_SW_SCANNING, &local->scanning)))
5632 + test_bit(SCAN_OFF_CHANNEL, &local->scanning)))
5633 status->rx_flags |= IEEE80211_RX_IN_SCAN;
5634
5635 if (ieee80211_is_mgmt(fc))
5636 diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
5637 index 105436d..5279300 100644
5638 --- a/net/mac80211/scan.c
5639 +++ b/net/mac80211/scan.c
5640 @@ -213,14 +213,6 @@ ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
5641 if (bss)
5642 ieee80211_rx_bss_put(sdata->local, bss);
5643
5644 - /* If we are on-operating-channel, and this packet is for the
5645 - * current channel, pass the pkt on up the stack so that
5646 - * the rest of the stack can make use of it.
5647 - */
5648 - if (ieee80211_cfg_on_oper_channel(sdata->local)
5649 - && (channel == sdata->local->oper_channel))
5650 - return RX_CONTINUE;
5651 -
5652 dev_kfree_skb(skb);
5653 return RX_QUEUED;
5654 }
5655 @@ -264,8 +256,6 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted,
5656 bool was_hw_scan)
5657 {
5658 struct ieee80211_local *local = hw_to_local(hw);
5659 - bool on_oper_chan;
5660 - bool enable_beacons = false;
5661
5662 lockdep_assert_held(&local->mtx);
5663
5664 @@ -298,25 +288,11 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted,
5665 local->scanning = 0;
5666 local->scan_channel = NULL;
5667
5668 - on_oper_chan = ieee80211_cfg_on_oper_channel(local);
5669 -
5670 - if (was_hw_scan || !on_oper_chan)
5671 - ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
5672 - else
5673 - /* Set power back to normal operating levels. */
5674 - ieee80211_hw_config(local, 0);
5675 -
5676 + ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
5677 if (!was_hw_scan) {
5678 - bool on_oper_chan2;
5679 ieee80211_configure_filter(local);
5680 drv_sw_scan_complete(local);
5681 - on_oper_chan2 = ieee80211_cfg_on_oper_channel(local);
5682 - /* We should always be on-channel at this point. */
5683 - WARN_ON(!on_oper_chan2);
5684 - if (on_oper_chan2 && (on_oper_chan != on_oper_chan2))
5685 - enable_beacons = true;
5686 -
5687 - ieee80211_offchannel_return(local, enable_beacons, true);
5688 + ieee80211_offchannel_return(local, true);
5689 }
5690
5691 ieee80211_recalc_idle(local);
5692 @@ -357,15 +333,13 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local)
5693 */
5694 drv_sw_scan_start(local);
5695
5696 + ieee80211_offchannel_stop_beaconing(local);
5697 +
5698 local->leave_oper_channel_time = 0;
5699 local->next_scan_state = SCAN_DECISION;
5700 local->scan_channel_idx = 0;
5701
5702 - /* We always want to use off-channel PS, even if we
5703 - * are not really leaving oper-channel. Don't
5704 - * tell the AP though, as long as we are on-channel.
5705 - */
5706 - ieee80211_offchannel_enable_all_ps(local, false);
5707 + drv_flush(local, false);
5708
5709 ieee80211_configure_filter(local);
5710
5711 @@ -508,20 +482,7 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local,
5712 }
5713 mutex_unlock(&local->iflist_mtx);
5714
5715 - next_chan = local->scan_req->channels[local->scan_channel_idx];
5716 -
5717 - if (ieee80211_cfg_on_oper_channel(local)) {
5718 - /* We're currently on operating channel. */
5719 - if (next_chan == local->oper_channel)
5720 - /* We don't need to move off of operating channel. */
5721 - local->next_scan_state = SCAN_SET_CHANNEL;
5722 - else
5723 - /*
5724 - * We do need to leave operating channel, as next
5725 - * scan is somewhere else.
5726 - */
5727 - local->next_scan_state = SCAN_LEAVE_OPER_CHANNEL;
5728 - } else {
5729 + if (local->scan_channel) {
5730 /*
5731 * we're currently scanning a different channel, let's
5732 * see if we can scan another channel without interfering
5733 @@ -537,6 +498,7 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local,
5734 *
5735 * Otherwise switch back to the operating channel.
5736 */
5737 + next_chan = local->scan_req->channels[local->scan_channel_idx];
5738
5739 bad_latency = time_after(jiffies +
5740 ieee80211_scan_get_channel_time(next_chan),
5741 @@ -554,6 +516,12 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local,
5742 local->next_scan_state = SCAN_ENTER_OPER_CHANNEL;
5743 else
5744 local->next_scan_state = SCAN_SET_CHANNEL;
5745 + } else {
5746 + /*
5747 + * we're on the operating channel currently, let's
5748 + * leave that channel now to scan another one
5749 + */
5750 + local->next_scan_state = SCAN_LEAVE_OPER_CHANNEL;
5751 }
5752
5753 *next_delay = 0;
5754 @@ -562,10 +530,9 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local,
5755 static void ieee80211_scan_state_leave_oper_channel(struct ieee80211_local *local,
5756 unsigned long *next_delay)
5757 {
5758 - /* PS will already be in off-channel mode,
5759 - * we do that once at the beginning of scanning.
5760 - */
5761 - ieee80211_offchannel_stop_vifs(local, false);
5762 + ieee80211_offchannel_stop_station(local);
5763 +
5764 + __set_bit(SCAN_OFF_CHANNEL, &local->scanning);
5765
5766 /*
5767 * What if the nullfunc frames didn't arrive?
5768 @@ -588,15 +555,15 @@ static void ieee80211_scan_state_enter_oper_channel(struct ieee80211_local *loca
5769 {
5770 /* switch back to the operating channel */
5771 local->scan_channel = NULL;
5772 - if (!ieee80211_cfg_on_oper_channel(local))
5773 - ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
5774 + ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
5775
5776 /*
5777 - * Re-enable vifs and beaconing. Leave PS
5778 - * in off-channel state..will put that back
5779 - * on-channel at the end of scanning.
5780 + * Only re-enable station mode interface now; beaconing will be
5781 + * re-enabled once the full scan has been completed.
5782 */
5783 - ieee80211_offchannel_return(local, true, false);
5784 + ieee80211_offchannel_return(local, false);
5785 +
5786 + __clear_bit(SCAN_OFF_CHANNEL, &local->scanning);
5787
5788 *next_delay = HZ / 5;
5789 local->next_scan_state = SCAN_DECISION;
5790 diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
5791 index 1f8b120..eff1f4e 100644
5792 --- a/net/mac80211/tx.c
5793 +++ b/net/mac80211/tx.c
5794 @@ -259,8 +259,7 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
5795 if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED))
5796 return TX_CONTINUE;
5797
5798 - if (unlikely(test_bit(SCAN_SW_SCANNING, &tx->local->scanning)) &&
5799 - test_bit(SDATA_STATE_OFFCHANNEL, &tx->sdata->state) &&
5800 + if (unlikely(test_bit(SCAN_OFF_CHANNEL, &tx->local->scanning)) &&
5801 !ieee80211_is_probe_req(hdr->frame_control) &&
5802 !ieee80211_is_nullfunc(hdr->frame_control))
5803 /*
5804 diff --git a/net/mac80211/work.c b/net/mac80211/work.c
5805 index 6c53b6d..99165ef 100644
5806 --- a/net/mac80211/work.c
5807 +++ b/net/mac80211/work.c
5808 @@ -899,26 +899,6 @@ static bool ieee80211_work_ct_coexists(enum nl80211_channel_type wk_ct,
5809 return false;
5810 }
5811
5812 -static enum nl80211_channel_type
5813 -ieee80211_calc_ct(enum nl80211_channel_type wk_ct,
5814 - enum nl80211_channel_type oper_ct)
5815 -{
5816 - switch (wk_ct) {
5817 - case NL80211_CHAN_NO_HT:
5818 - return oper_ct;
5819 - case NL80211_CHAN_HT20:
5820 - if (oper_ct != NL80211_CHAN_NO_HT)
5821 - return oper_ct;
5822 - return wk_ct;
5823 - case NL80211_CHAN_HT40MINUS:
5824 - case NL80211_CHAN_HT40PLUS:
5825 - return wk_ct;
5826 - }
5827 - WARN_ON(1); /* shouldn't get here */
5828 - return wk_ct;
5829 -}
5830 -
5831 -
5832 static void ieee80211_work_timer(unsigned long data)
5833 {
5834 struct ieee80211_local *local = (void *) data;
5835 @@ -969,52 +949,18 @@ static void ieee80211_work_work(struct work_struct *work)
5836 }
5837
5838 if (!started && !local->tmp_channel) {
5839 - bool on_oper_chan;
5840 - bool tmp_chan_changed = false;
5841 - bool on_oper_chan2;
5842 - enum nl80211_channel_type wk_ct;
5843 - on_oper_chan = ieee80211_cfg_on_oper_channel(local);
5844 -
5845 - /* Work with existing channel type if possible. */
5846 - wk_ct = wk->chan_type;
5847 - if (wk->chan == local->hw.conf.channel)
5848 - wk_ct = ieee80211_calc_ct(wk->chan_type,
5849 - local->hw.conf.channel_type);
5850 -
5851 - if (local->tmp_channel)
5852 - if ((local->tmp_channel != wk->chan) ||
5853 - (local->tmp_channel_type != wk_ct))
5854 - tmp_chan_changed = true;
5855 -
5856 - local->tmp_channel = wk->chan;
5857 - local->tmp_channel_type = wk_ct;
5858 /*
5859 - * Leave the station vifs in awake mode if they
5860 - * happen to be on the same channel as
5861 - * the requested channel.
5862 + * TODO: could optimize this by leaving the
5863 + * station vifs in awake mode if they
5864 + * happen to be on the same channel as
5865 + * the requested channel
5866 */
5867 - on_oper_chan2 = ieee80211_cfg_on_oper_channel(local);
5868 - if (on_oper_chan != on_oper_chan2) {
5869 - if (on_oper_chan2) {
5870 - /* going off oper channel, PS too */
5871 - ieee80211_offchannel_stop_vifs(local,
5872 - true);
5873 - ieee80211_hw_config(local, 0);
5874 - } else {
5875 - /* going on channel, but leave PS
5876 - * off-channel. */
5877 - ieee80211_hw_config(local, 0);
5878 - ieee80211_offchannel_return(local,
5879 - true,
5880 - false);
5881 - }
5882 - } else if (tmp_chan_changed)
5883 - /* Still off-channel, but on some other
5884 - * channel, so update hardware.
5885 - * PS should already be off-channel.
5886 - */
5887 - ieee80211_hw_config(local, 0);
5888 + ieee80211_offchannel_stop_beaconing(local);
5889 + ieee80211_offchannel_stop_station(local);
5890
5891 + local->tmp_channel = wk->chan;
5892 + local->tmp_channel_type = wk->chan_type;
5893 + ieee80211_hw_config(local, 0);
5894 started = true;
5895 wk->timeout = jiffies;
5896 }
5897 @@ -1100,8 +1046,7 @@ static void ieee80211_work_work(struct work_struct *work)
5898 * we still need to do a hardware config. Currently,
5899 * we cannot be here while scanning, however.
5900 */
5901 - if (!ieee80211_cfg_on_oper_channel(local))
5902 - ieee80211_hw_config(local, 0);
5903 + ieee80211_hw_config(local, 0);
5904
5905 /* At the least, we need to disable offchannel_ps,
5906 * so just go ahead and run the entire offchannel
5907 @@ -1109,7 +1054,7 @@ static void ieee80211_work_work(struct work_struct *work)
5908 * beaconing if we were already on-oper-channel
5909 * as a future optimization.
5910 */
5911 - ieee80211_offchannel_return(local, true, true);
5912 + ieee80211_offchannel_return(local, true);
5913
5914 /* give connection some time to breathe */
5915 run_again(local, jiffies + HZ/2);
5916 diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
5917 index f614ce7..28a39bb 100644
5918 --- a/net/mac80211/wpa.c
5919 +++ b/net/mac80211/wpa.c
5920 @@ -106,7 +106,7 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
5921 if (status->flag & RX_FLAG_MMIC_ERROR)
5922 goto mic_fail;
5923
5924 - if (!(status->flag & RX_FLAG_IV_STRIPPED))
5925 + if (!(status->flag & RX_FLAG_IV_STRIPPED) && rx->key)
5926 goto update_iv;
5927
5928 return RX_CONTINUE;
5929 diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
5930 index 6e03888..d4ad50e 100644
5931 --- a/net/sunrpc/svc.c
5932 +++ b/net/sunrpc/svc.c
5933 @@ -167,6 +167,7 @@ svc_pool_map_alloc_arrays(struct svc_pool_map *m, unsigned int maxpools)
5934
5935 fail_free:
5936 kfree(m->to_pool);
5937 + m->to_pool = NULL;
5938 fail:
5939 return -ENOMEM;
5940 }
5941 @@ -287,7 +288,9 @@ svc_pool_map_put(void)
5942 if (!--m->count) {
5943 m->mode = SVC_POOL_DEFAULT;
5944 kfree(m->to_pool);
5945 + m->to_pool = NULL;
5946 kfree(m->pool_to);
5947 + m->pool_to = NULL;
5948 m->npools = 0;
5949 }
5950
5951 @@ -527,17 +530,20 @@ svc_destroy(struct svc_serv *serv)
5952 printk("svc_destroy: no threads for serv=%p!\n", serv);
5953
5954 del_timer_sync(&serv->sv_temptimer);
5955 -
5956 - svc_close_all(&serv->sv_tempsocks);
5957 + /*
5958 + * The set of xprts (contained in the sv_tempsocks and
5959 + * sv_permsocks lists) is now constant, since it is modified
5960 + * only by accepting new sockets (done by service threads in
5961 + * svc_recv) or aging old ones (done by sv_temptimer), or
5962 + * configuration changes (excluded by whatever locking the
5963 + * caller is using--nfsd_mutex in the case of nfsd). So it's
5964 + * safe to traverse those lists and shut everything down:
5965 + */
5966 + svc_close_all(serv);
5967
5968 if (serv->sv_shutdown)
5969 serv->sv_shutdown(serv);
5970
5971 - svc_close_all(&serv->sv_permsocks);
5972 -
5973 - BUG_ON(!list_empty(&serv->sv_permsocks));
5974 - BUG_ON(!list_empty(&serv->sv_tempsocks));
5975 -
5976 cache_clean_deferred(serv);
5977
5978 if (svc_serv_is_pooled(serv))
5979 diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
5980 index 447cd0e..9ed2cd0 100644
5981 --- a/net/sunrpc/svc_xprt.c
5982 +++ b/net/sunrpc/svc_xprt.c
5983 @@ -893,14 +893,7 @@ void svc_delete_xprt(struct svc_xprt *xprt)
5984 spin_lock_bh(&serv->sv_lock);
5985 if (!test_and_set_bit(XPT_DETACHED, &xprt->xpt_flags))
5986 list_del_init(&xprt->xpt_list);
5987 - /*
5988 - * The only time we're called while xpt_ready is still on a list
5989 - * is while the list itself is about to be destroyed (in
5990 - * svc_destroy). BUT svc_xprt_enqueue could still be attempting
5991 - * to add new entries to the sp_sockets list, so we can't leave
5992 - * a freed xprt on it.
5993 - */
5994 - list_del_init(&xprt->xpt_ready);
5995 + BUG_ON(!list_empty(&xprt->xpt_ready));
5996 if (test_bit(XPT_TEMP, &xprt->xpt_flags))
5997 serv->sv_tmpcnt--;
5998 spin_unlock_bh(&serv->sv_lock);
5999 @@ -928,22 +921,48 @@ void svc_close_xprt(struct svc_xprt *xprt)
6000 }
6001 EXPORT_SYMBOL_GPL(svc_close_xprt);
6002
6003 -void svc_close_all(struct list_head *xprt_list)
6004 +static void svc_close_list(struct list_head *xprt_list)
6005 +{
6006 + struct svc_xprt *xprt;
6007 +
6008 + list_for_each_entry(xprt, xprt_list, xpt_list) {
6009 + set_bit(XPT_CLOSE, &xprt->xpt_flags);
6010 + set_bit(XPT_BUSY, &xprt->xpt_flags);
6011 + }
6012 +}
6013 +
6014 +void svc_close_all(struct svc_serv *serv)
6015 {
6016 + struct svc_pool *pool;
6017 struct svc_xprt *xprt;
6018 struct svc_xprt *tmp;
6019 + int i;
6020 +
6021 + svc_close_list(&serv->sv_tempsocks);
6022 + svc_close_list(&serv->sv_permsocks);
6023
6024 + for (i = 0; i < serv->sv_nrpools; i++) {
6025 + pool = &serv->sv_pools[i];
6026 +
6027 + spin_lock_bh(&pool->sp_lock);
6028 + while (!list_empty(&pool->sp_sockets)) {
6029 + xprt = list_first_entry(&pool->sp_sockets, struct svc_xprt, xpt_ready);
6030 + list_del_init(&xprt->xpt_ready);
6031 + }
6032 + spin_unlock_bh(&pool->sp_lock);
6033 + }
6034 /*
6035 - * The server is shutting down, and no more threads are running.
6036 - * svc_xprt_enqueue() might still be running, but at worst it
6037 - * will re-add the xprt to sp_sockets, which will soon get
6038 - * freed. So we don't bother with any more locking, and don't
6039 - * leave the close to the (nonexistent) server threads:
6040 + * At this point the sp_sockets lists will stay empty, since
6041 + * svc_enqueue will not add new entries without taking the
6042 + * sp_lock and checking XPT_BUSY.
6043 */
6044 - list_for_each_entry_safe(xprt, tmp, xprt_list, xpt_list) {
6045 - set_bit(XPT_CLOSE, &xprt->xpt_flags);
6046 + list_for_each_entry_safe(xprt, tmp, &serv->sv_tempsocks, xpt_list)
6047 svc_delete_xprt(xprt);
6048 - }
6049 + list_for_each_entry_safe(xprt, tmp, &serv->sv_permsocks, xpt_list)
6050 + svc_delete_xprt(xprt);
6051 +
6052 + BUG_ON(!list_empty(&serv->sv_permsocks));
6053 + BUG_ON(!list_empty(&serv->sv_tempsocks));
6054 }
6055
6056 /*
6057 diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
6058 index 277ebd4..593f4c6 100644
6059 --- a/net/sunrpc/xdr.c
6060 +++ b/net/sunrpc/xdr.c
6061 @@ -296,7 +296,7 @@ _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
6062 * Copies data into an arbitrary memory location from an array of pages
6063 * The copy is assumed to be non-overlapping.
6064 */
6065 -static void
6066 +void
6067 _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
6068 {
6069 struct page **pgfrom;
6070 @@ -324,6 +324,7 @@ _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
6071
6072 } while ((len -= copy) != 0);
6073 }
6074 +EXPORT_SYMBOL_GPL(_copy_from_pages);
6075
6076 /*
6077 * xdr_shrink_bufhead
6078 diff --git a/scripts/kconfig/streamline_config.pl b/scripts/kconfig/streamline_config.pl
6079 index ec7afce..bccf07d 100644
6080 --- a/scripts/kconfig/streamline_config.pl
6081 +++ b/scripts/kconfig/streamline_config.pl
6082 @@ -250,33 +250,61 @@ if ($kconfig) {
6083 read_kconfig($kconfig);
6084 }
6085
6086 +sub convert_vars {
6087 + my ($line, %vars) = @_;
6088 +
6089 + my $process = "";
6090 +
6091 + while ($line =~ s/^(.*?)(\$\((.*?)\))//) {
6092 + my $start = $1;
6093 + my $variable = $2;
6094 + my $var = $3;
6095 +
6096 + if (defined($vars{$var})) {
6097 + $process .= $start . $vars{$var};
6098 + } else {
6099 + $process .= $start . $variable;
6100 + }
6101 + }
6102 +
6103 + $process .= $line;
6104 +
6105 + return $process;
6106 +}
6107 +
6108 # Read all Makefiles to map the configs to the objects
6109 foreach my $makefile (@makefiles) {
6110
6111 - my $cont = 0;
6112 + my $line = "";
6113 + my %make_vars;
6114
6115 open(MIN,$makefile) || die "Can't open $makefile";
6116 while (<MIN>) {
6117 + # if this line ends with a backslash, continue
6118 + chomp;
6119 + if (/^(.*)\\$/) {
6120 + $line .= $1;
6121 + next;
6122 + }
6123 +
6124 + $line .= $_;
6125 + $_ = $line;
6126 + $line = "";
6127 +
6128 my $objs;
6129
6130 - # is this a line after a line with a backslash?
6131 - if ($cont && /(\S.*)$/) {
6132 - $objs = $1;
6133 - }
6134 - $cont = 0;
6135 + $_ = convert_vars($_, %make_vars);
6136
6137 # collect objects after obj-$(CONFIG_FOO_BAR)
6138 if (/obj-\$\((CONFIG_[^\)]*)\)\s*[+:]?=\s*(.*)/) {
6139 $var = $1;
6140 $objs = $2;
6141 +
6142 + # check if variables are set
6143 + } elsif (/^\s*(\S+)\s*[:]?=\s*(.*\S)/) {
6144 + $make_vars{$1} = $2;
6145 }
6146 if (defined($objs)) {
6147 - # test if the line ends with a backslash
6148 - if ($objs =~ m,(.*)\\$,) {
6149 - $objs = $1;
6150 - $cont = 1;
6151 - }
6152 -
6153 foreach my $obj (split /\s+/,$objs) {
6154 $obj =~ s/-/_/g;
6155 if ($obj =~ /(.*)\.o$/) {
6156 diff --git a/scripts/recordmcount.h b/scripts/recordmcount.h
6157 index f40a6af6..54e35c1 100644
6158 --- a/scripts/recordmcount.h
6159 +++ b/scripts/recordmcount.h
6160 @@ -462,7 +462,7 @@ __has_rel_mcount(Elf_Shdr const *const relhdr, /* is SHT_REL or SHT_RELA */
6161 succeed_file();
6162 }
6163 if (w(txthdr->sh_type) != SHT_PROGBITS ||
6164 - !(w(txthdr->sh_flags) & SHF_EXECINSTR))
6165 + !(_w(txthdr->sh_flags) & SHF_EXECINSTR))
6166 return NULL;
6167 return txtname;
6168 }
6169 diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
6170 index 0d50df0..88a2788 100644
6171 --- a/security/integrity/ima/ima_api.c
6172 +++ b/security/integrity/ima/ima_api.c
6173 @@ -178,8 +178,8 @@ void ima_store_measurement(struct integrity_iint_cache *iint,
6174 strncpy(entry->template.file_name, filename, IMA_EVENT_NAME_LEN_MAX);
6175
6176 result = ima_store_template(entry, violation, inode);
6177 - if (!result)
6178 + if (!result || result == -EEXIST)
6179 iint->flags |= IMA_MEASURED;
6180 - else
6181 + if (result < 0)
6182 kfree(entry);
6183 }
6184 diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
6185 index 8e28f04..55a6271 100644
6186 --- a/security/integrity/ima/ima_queue.c
6187 +++ b/security/integrity/ima/ima_queue.c
6188 @@ -23,6 +23,8 @@
6189 #include <linux/slab.h>
6190 #include "ima.h"
6191
6192 +#define AUDIT_CAUSE_LEN_MAX 32
6193 +
6194 LIST_HEAD(ima_measurements); /* list of all measurements */
6195
6196 /* key: inode (before secure-hashing a file) */
6197 @@ -94,7 +96,8 @@ static int ima_pcr_extend(const u8 *hash)
6198
6199 result = tpm_pcr_extend(TPM_ANY_NUM, CONFIG_IMA_MEASURE_PCR_IDX, hash);
6200 if (result != 0)
6201 - pr_err("IMA: Error Communicating to TPM chip\n");
6202 + pr_err("IMA: Error Communicating to TPM chip, result: %d\n",
6203 + result);
6204 return result;
6205 }
6206
6207 @@ -106,14 +109,16 @@ int ima_add_template_entry(struct ima_template_entry *entry, int violation,
6208 {
6209 u8 digest[IMA_DIGEST_SIZE];
6210 const char *audit_cause = "hash_added";
6211 + char tpm_audit_cause[AUDIT_CAUSE_LEN_MAX];
6212 int audit_info = 1;
6213 - int result = 0;
6214 + int result = 0, tpmresult = 0;
6215
6216 mutex_lock(&ima_extend_list_mutex);
6217 if (!violation) {
6218 memcpy(digest, entry->digest, sizeof digest);
6219 if (ima_lookup_digest_entry(digest)) {
6220 audit_cause = "hash_exists";
6221 + result = -EEXIST;
6222 goto out;
6223 }
6224 }
6225 @@ -128,9 +133,11 @@ int ima_add_template_entry(struct ima_template_entry *entry, int violation,
6226 if (violation) /* invalidate pcr */
6227 memset(digest, 0xff, sizeof digest);
6228
6229 - result = ima_pcr_extend(digest);
6230 - if (result != 0) {
6231 - audit_cause = "TPM error";
6232 + tpmresult = ima_pcr_extend(digest);
6233 + if (tpmresult != 0) {
6234 + snprintf(tpm_audit_cause, AUDIT_CAUSE_LEN_MAX, "TPM_error(%d)",
6235 + tpmresult);
6236 + audit_cause = tpm_audit_cause;
6237 audit_info = 0;
6238 }
6239 out:
6240 diff --git a/security/tomoyo/util.c b/security/tomoyo/util.c
6241 index 4a9b4b2..867558c 100644
6242 --- a/security/tomoyo/util.c
6243 +++ b/security/tomoyo/util.c
6244 @@ -492,13 +492,13 @@ static bool tomoyo_correct_word2(const char *string, size_t len)
6245 if (d < '0' || d > '7' || e < '0' || e > '7')
6246 break;
6247 c = tomoyo_make_byte(c, d, e);
6248 - if (tomoyo_invalid(c))
6249 - continue; /* pattern is not \000 */
6250 + if (c <= ' ' || c >= 127)
6251 + continue;
6252 }
6253 goto out;
6254 } else if (in_repetition && c == '/') {
6255 goto out;
6256 - } else if (tomoyo_invalid(c)) {
6257 + } else if (c <= ' ' || c >= 127) {
6258 goto out;
6259 }
6260 }
6261 diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
6262 index c2f79e6..5b2b75b 100644
6263 --- a/sound/pci/hda/hda_intel.c
6264 +++ b/sound/pci/hda/hda_intel.c
6265 @@ -2509,6 +2509,7 @@ static struct snd_pci_quirk position_fix_list[] __devinitdata = {
6266 SND_PCI_QUIRK(0x1043, 0x81e7, "ASUS M2V", POS_FIX_LPIB),
6267 SND_PCI_QUIRK(0x1043, 0x83ce, "ASUS 1101HA", POS_FIX_LPIB),
6268 SND_PCI_QUIRK(0x104d, 0x9069, "Sony VPCS11V9E", POS_FIX_LPIB),
6269 + SND_PCI_QUIRK(0x10de, 0xcb89, "Macbook Pro 7,1", POS_FIX_LPIB),
6270 SND_PCI_QUIRK(0x1297, 0x3166, "Shuttle", POS_FIX_LPIB),
6271 SND_PCI_QUIRK(0x1458, 0xa022, "ga-ma770-ud3", POS_FIX_LPIB),
6272 SND_PCI_QUIRK(0x1462, 0x1002, "MSI Wind U115", POS_FIX_LPIB),
6273 diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h
6274 index 618ddad..368f0c5 100644
6275 --- a/sound/pci/hda/hda_local.h
6276 +++ b/sound/pci/hda/hda_local.h
6277 @@ -487,7 +487,12 @@ static inline u32 get_wcaps(struct hda_codec *codec, hda_nid_t nid)
6278 }
6279
6280 /* get the widget type from widget capability bits */
6281 -#define get_wcaps_type(wcaps) (((wcaps) & AC_WCAP_TYPE) >> AC_WCAP_TYPE_SHIFT)
6282 +static inline int get_wcaps_type(unsigned int wcaps)
6283 +{
6284 + if (!wcaps)
6285 + return -1; /* invalid type */
6286 + return (wcaps & AC_WCAP_TYPE) >> AC_WCAP_TYPE_SHIFT;
6287 +}
6288
6289 static inline unsigned int get_wcaps_channels(u32 wcaps)
6290 {
6291 diff --git a/sound/pci/hda/hda_proc.c b/sound/pci/hda/hda_proc.c
6292 index 2c981b5..254ab52 100644
6293 --- a/sound/pci/hda/hda_proc.c
6294 +++ b/sound/pci/hda/hda_proc.c
6295 @@ -54,6 +54,8 @@ static const char *get_wid_type_name(unsigned int wid_value)
6296 [AC_WID_BEEP] = "Beep Generator Widget",
6297 [AC_WID_VENDOR] = "Vendor Defined Widget",
6298 };
6299 + if (wid_value == -1)
6300 + return "UNKNOWN Widget";
6301 wid_value &= 0xf;
6302 if (names[wid_value])
6303 return names[wid_value];
6304 diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
6305 index 70a7abd..5b0a9bb 100644
6306 --- a/sound/pci/hda/patch_cirrus.c
6307 +++ b/sound/pci/hda/patch_cirrus.c
6308 @@ -920,16 +920,14 @@ static void cs_automute(struct hda_codec *codec)
6309
6310 /* mute speakers if spdif or hp jack is plugged in */
6311 for (i = 0; i < cfg->speaker_outs; i++) {
6312 + int pin_ctl = hp_present ? 0 : PIN_OUT;
6313 + /* detect on spdif is specific to CS421x */
6314 + if (spdif_present && (spec->vendor_nid == CS421X_VENDOR_NID))
6315 + pin_ctl = 0;
6316 +
6317 nid = cfg->speaker_pins[i];
6318 snd_hda_codec_write(codec, nid, 0,
6319 - AC_VERB_SET_PIN_WIDGET_CONTROL,
6320 - hp_present ? 0 : PIN_OUT);
6321 - /* detect on spdif is specific to CS421x */
6322 - if (spec->vendor_nid == CS421X_VENDOR_NID) {
6323 - snd_hda_codec_write(codec, nid, 0,
6324 - AC_VERB_SET_PIN_WIDGET_CONTROL,
6325 - spdif_present ? 0 : PIN_OUT);
6326 - }
6327 + AC_VERB_SET_PIN_WIDGET_CONTROL, pin_ctl);
6328 }
6329 if (spec->gpio_eapd_hp) {
6330 unsigned int gpio = hp_present ?
6331 @@ -1771,30 +1769,19 @@ static int build_cs421x_output(struct hda_codec *codec)
6332 struct auto_pin_cfg *cfg = &spec->autocfg;
6333 struct snd_kcontrol *kctl;
6334 int err;
6335 - char *name = "HP/Speakers";
6336 + char *name = "Master";
6337
6338 fix_volume_caps(codec, dac);
6339 - if (!spec->vmaster_sw) {
6340 - err = add_vmaster(codec, dac);
6341 - if (err < 0)
6342 - return err;
6343 - }
6344
6345 err = add_mute(codec, name, 0,
6346 HDA_COMPOSE_AMP_VAL(dac, 3, 0, HDA_OUTPUT), 0, &kctl);
6347 if (err < 0)
6348 return err;
6349 - err = snd_ctl_add_slave(spec->vmaster_sw, kctl);
6350 - if (err < 0)
6351 - return err;
6352
6353 err = add_volume(codec, name, 0,
6354 HDA_COMPOSE_AMP_VAL(dac, 3, 0, HDA_OUTPUT), 0, &kctl);
6355 if (err < 0)
6356 return err;
6357 - err = snd_ctl_add_slave(spec->vmaster_vol, kctl);
6358 - if (err < 0)
6359 - return err;
6360
6361 if (cfg->speaker_outs) {
6362 err = snd_hda_ctl_add(codec, 0,
6363 diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
6364 index 0de2119..7072251 100644
6365 --- a/sound/pci/hda/patch_conexant.c
6366 +++ b/sound/pci/hda/patch_conexant.c
6367 @@ -1120,8 +1120,6 @@ static const char * const cxt5045_models[CXT5045_MODELS] = {
6368
6369 static const struct snd_pci_quirk cxt5045_cfg_tbl[] = {
6370 SND_PCI_QUIRK(0x103c, 0x30d5, "HP 530", CXT5045_LAPTOP_HP530),
6371 - SND_PCI_QUIRK_MASK(0x103c, 0xff00, 0x3000, "HP DV Series",