/[linux-patches]/genpatches-2.6/tags/3.0-30/1017_linux-3.0.18.patch
Gentoo

Contents of /genpatches-2.6/tags/3.0-30/1017_linux-3.0.18.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2206 - (show annotations) (download)
Mon Sep 17 18:58:14 2012 UTC (2 years, 3 months ago) by mpagano
File size: 112745 byte(s)
3.0-30 release
1 diff --git a/Makefile b/Makefile
2 index 295fbda..581b8e9 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 3
7 PATCHLEVEL = 0
8 -SUBLEVEL = 17
9 +SUBLEVEL = 18
10 EXTRAVERSION =
11 NAME = Sneaky Weasel
12
13 diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
14 index 3be485a..f19de9f 100644
15 --- a/arch/ia64/kernel/acpi.c
16 +++ b/arch/ia64/kernel/acpi.c
17 @@ -429,22 +429,24 @@ static u32 __devinitdata pxm_flag[PXM_FLAG_LEN];
18 static struct acpi_table_slit __initdata *slit_table;
19 cpumask_t early_cpu_possible_map = CPU_MASK_NONE;
20
21 -static int get_processor_proximity_domain(struct acpi_srat_cpu_affinity *pa)
22 +static int __init
23 +get_processor_proximity_domain(struct acpi_srat_cpu_affinity *pa)
24 {
25 int pxm;
26
27 pxm = pa->proximity_domain_lo;
28 - if (ia64_platform_is("sn2"))
29 + if (ia64_platform_is("sn2") || acpi_srat_revision >= 2)
30 pxm += pa->proximity_domain_hi[0] << 8;
31 return pxm;
32 }
33
34 -static int get_memory_proximity_domain(struct acpi_srat_mem_affinity *ma)
35 +static int __init
36 +get_memory_proximity_domain(struct acpi_srat_mem_affinity *ma)
37 {
38 int pxm;
39
40 pxm = ma->proximity_domain;
41 - if (!ia64_platform_is("sn2"))
42 + if (!ia64_platform_is("sn2") && acpi_srat_revision <= 1)
43 pxm &= 0xff;
44
45 return pxm;
46 diff --git a/arch/score/kernel/entry.S b/arch/score/kernel/entry.S
47 index 577abba..83bb960 100644
48 --- a/arch/score/kernel/entry.S
49 +++ b/arch/score/kernel/entry.S
50 @@ -408,7 +408,7 @@ ENTRY(handle_sys)
51 sw r9, [r0, PT_EPC]
52
53 cmpi.c r27, __NR_syscalls # check syscall number
54 - bgtu illegal_syscall
55 + bgeu illegal_syscall
56
57 slli r8, r27, 2 # get syscall routine
58 la r11, sys_call_table
59 diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h
60 index 67f87f2..78a1eff 100644
61 --- a/arch/x86/include/asm/amd_nb.h
62 +++ b/arch/x86/include/asm/amd_nb.h
63 @@ -1,6 +1,7 @@
64 #ifndef _ASM_X86_AMD_NB_H
65 #define _ASM_X86_AMD_NB_H
66
67 +#include <linux/ioport.h>
68 #include <linux/pci.h>
69
70 struct amd_nb_bus_dev_range {
71 @@ -13,6 +14,7 @@ extern const struct pci_device_id amd_nb_misc_ids[];
72 extern const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[];
73
74 extern bool early_is_amd_nb(u32 value);
75 +extern struct resource *amd_get_mmconfig_range(struct resource *res);
76 extern int amd_cache_northbridges(void);
77 extern void amd_flush_garts(void);
78 extern int amd_numa_init(void);
79 diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
80 index 4c39baa..bae1efe 100644
81 --- a/arch/x86/kernel/amd_nb.c
82 +++ b/arch/x86/kernel/amd_nb.c
83 @@ -119,6 +119,37 @@ bool __init early_is_amd_nb(u32 device)
84 return false;
85 }
86
87 +struct resource *amd_get_mmconfig_range(struct resource *res)
88 +{
89 + u32 address;
90 + u64 base, msr;
91 + unsigned segn_busn_bits;
92 +
93 + if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
94 + return NULL;
95 +
96 + /* assume all cpus from fam10h have mmconfig */
97 + if (boot_cpu_data.x86 < 0x10)
98 + return NULL;
99 +
100 + address = MSR_FAM10H_MMIO_CONF_BASE;
101 + rdmsrl(address, msr);
102 +
103 + /* mmconfig is not enabled */
104 + if (!(msr & FAM10H_MMIO_CONF_ENABLE))
105 + return NULL;
106 +
107 + base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
108 +
109 + segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
110 + FAM10H_MMIO_CONF_BUSRANGE_MASK;
111 +
112 + res->flags = IORESOURCE_MEM;
113 + res->start = base;
114 + res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
115 + return res;
116 +}
117 +
118 int amd_get_subcaches(int cpu)
119 {
120 struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
121 diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
122 index cfeb978..874c208 100644
123 --- a/arch/x86/kernel/apic/x2apic_uv_x.c
124 +++ b/arch/x86/kernel/apic/x2apic_uv_x.c
125 @@ -779,7 +779,12 @@ void __init uv_system_init(void)
126 for(i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++)
127 uv_possible_blades +=
128 hweight64(uv_read_local_mmr( UVH_NODE_PRESENT_TABLE + i * 8));
129 - printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades());
130 +
131 + /* uv_num_possible_blades() is really the hub count */
132 + printk(KERN_INFO "UV: Found %d blades, %d hubs\n",
133 + is_uv1_hub() ? uv_num_possible_blades() :
134 + (uv_num_possible_blades() + 1) / 2,
135 + uv_num_possible_blades());
136
137 bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades();
138 uv_blade_info = kzalloc(bytes, GFP_KERNEL);
139 diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
140 index 1dab519..f927429 100644
141 --- a/arch/x86/mm/mmap.c
142 +++ b/arch/x86/mm/mmap.c
143 @@ -87,9 +87,9 @@ static unsigned long mmap_rnd(void)
144 */
145 if (current->flags & PF_RANDOMIZE) {
146 if (mmap_is_ia32())
147 - rnd = (long)get_random_int() % (1<<8);
148 + rnd = get_random_int() % (1<<8);
149 else
150 - rnd = (long)(get_random_int() % (1<<28));
151 + rnd = get_random_int() % (1<<28);
152 }
153 return rnd << PAGE_SHIFT;
154 }
155 diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c
156 index 81dbfde..7efd0c6 100644
157 --- a/arch/x86/mm/srat.c
158 +++ b/arch/x86/mm/srat.c
159 @@ -104,6 +104,8 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
160 if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0)
161 return;
162 pxm = pa->proximity_domain_lo;
163 + if (acpi_srat_revision >= 2)
164 + pxm |= *((unsigned int*)pa->proximity_domain_hi) << 8;
165 node = setup_node(pxm);
166 if (node < 0) {
167 printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm);
168 @@ -155,6 +157,8 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
169 start = ma->base_address;
170 end = start + ma->length;
171 pxm = ma->proximity_domain;
172 + if (acpi_srat_revision <= 1)
173 + pxm &= 0xff;
174 node = setup_node(pxm);
175 if (node < 0) {
176 printk(KERN_ERR "SRAT: Too many proximity domains.\n");
177 diff --git a/arch/x86/pci/Makefile b/arch/x86/pci/Makefile
178 index 6b8759f..d24d3da 100644
179 --- a/arch/x86/pci/Makefile
180 +++ b/arch/x86/pci/Makefile
181 @@ -18,8 +18,9 @@ obj-$(CONFIG_X86_NUMAQ) += numaq_32.o
182 obj-$(CONFIG_X86_MRST) += mrst.o
183
184 obj-y += common.o early.o
185 -obj-y += amd_bus.o bus_numa.o
186 +obj-y += bus_numa.o
187
188 +obj-$(CONFIG_AMD_NB) += amd_bus.o
189 obj-$(CONFIG_PCI_CNB20LE_QUIRK) += broadcom_bus.o
190
191 ifeq ($(CONFIG_PCI_DEBUG),y)
192 diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
193 index 50b3f14..53f9e68 100644
194 --- a/arch/x86/pci/acpi.c
195 +++ b/arch/x86/pci/acpi.c
196 @@ -149,7 +149,7 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
197 struct acpi_resource_address64 addr;
198 acpi_status status;
199 unsigned long flags;
200 - u64 start, end;
201 + u64 start, orig_end, end;
202
203 status = resource_to_addr(acpi_res, &addr);
204 if (!ACPI_SUCCESS(status))
205 @@ -165,7 +165,21 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
206 return AE_OK;
207
208 start = addr.minimum + addr.translation_offset;
209 - end = addr.maximum + addr.translation_offset;
210 + orig_end = end = addr.maximum + addr.translation_offset;
211 +
212 + /* Exclude non-addressable range or non-addressable portion of range */
213 + end = min(end, (u64)iomem_resource.end);
214 + if (end <= start) {
215 + dev_info(&info->bridge->dev,
216 + "host bridge window [%#llx-%#llx] "
217 + "(ignored, not CPU addressable)\n", start, orig_end);
218 + return AE_OK;
219 + } else if (orig_end != end) {
220 + dev_info(&info->bridge->dev,
221 + "host bridge window [%#llx-%#llx] "
222 + "([%#llx-%#llx] ignored, not CPU addressable)\n",
223 + start, orig_end, end + 1, orig_end);
224 + }
225
226 res = &info->res[info->res_num];
227 res->name = info->name;
228 diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
229 index 026e493..385a940 100644
230 --- a/arch/x86/pci/amd_bus.c
231 +++ b/arch/x86/pci/amd_bus.c
232 @@ -30,34 +30,6 @@ static struct pci_hostbridge_probe pci_probes[] __initdata = {
233 { 0, 0x18, PCI_VENDOR_ID_AMD, 0x1300 },
234 };
235
236 -static u64 __initdata fam10h_mmconf_start;
237 -static u64 __initdata fam10h_mmconf_end;
238 -static void __init get_pci_mmcfg_amd_fam10h_range(void)
239 -{
240 - u32 address;
241 - u64 base, msr;
242 - unsigned segn_busn_bits;
243 -
244 - /* assume all cpus from fam10h have mmconf */
245 - if (boot_cpu_data.x86 < 0x10)
246 - return;
247 -
248 - address = MSR_FAM10H_MMIO_CONF_BASE;
249 - rdmsrl(address, msr);
250 -
251 - /* mmconfig is not enable */
252 - if (!(msr & FAM10H_MMIO_CONF_ENABLE))
253 - return;
254 -
255 - base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
256 -
257 - segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
258 - FAM10H_MMIO_CONF_BUSRANGE_MASK;
259 -
260 - fam10h_mmconf_start = base;
261 - fam10h_mmconf_end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
262 -}
263 -
264 #define RANGE_NUM 16
265
266 /**
267 @@ -85,6 +57,9 @@ static int __init early_fill_mp_bus_info(void)
268 u64 val;
269 u32 address;
270 bool found;
271 + struct resource fam10h_mmconf_res, *fam10h_mmconf;
272 + u64 fam10h_mmconf_start;
273 + u64 fam10h_mmconf_end;
274
275 if (!early_pci_allowed())
276 return -1;
277 @@ -211,12 +186,17 @@ static int __init early_fill_mp_bus_info(void)
278 subtract_range(range, RANGE_NUM, 0, end);
279
280 /* get mmconfig */
281 - get_pci_mmcfg_amd_fam10h_range();
282 + fam10h_mmconf = amd_get_mmconfig_range(&fam10h_mmconf_res);
283 /* need to take out mmconf range */
284 - if (fam10h_mmconf_end) {
285 - printk(KERN_DEBUG "Fam 10h mmconf [%llx, %llx]\n", fam10h_mmconf_start, fam10h_mmconf_end);
286 + if (fam10h_mmconf) {
287 + printk(KERN_DEBUG "Fam 10h mmconf %pR\n", fam10h_mmconf);
288 + fam10h_mmconf_start = fam10h_mmconf->start;
289 + fam10h_mmconf_end = fam10h_mmconf->end;
290 subtract_range(range, RANGE_NUM, fam10h_mmconf_start,
291 fam10h_mmconf_end + 1);
292 + } else {
293 + fam10h_mmconf_start = 0;
294 + fam10h_mmconf_end = 0;
295 }
296
297 /* mmio resource */
298 diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
299 index 82cff4a..edf435b 100644
300 --- a/arch/x86/platform/uv/tlb_uv.c
301 +++ b/arch/x86/platform/uv/tlb_uv.c
302 @@ -1575,14 +1575,14 @@ static int calculate_destination_timeout(void)
303 ts_ns = base * mult1 * mult2;
304 ret = ts_ns / 1000;
305 } else {
306 - /* 4 bits 0/1 for 10/80us, 3 bits of multiplier */
307 - mmr_image = uv_read_local_mmr(UVH_AGING_PRESCALE_SEL);
308 + /* 4 bits 0/1 for 10/80us base, 3 bits of multiplier */
309 + mmr_image = uv_read_local_mmr(UVH_LB_BAU_MISC_CONTROL);
310 mmr_image = (mmr_image & UV_SA_MASK) >> UV_SA_SHFT;
311 if (mmr_image & (1L << UV2_ACK_UNITS_SHFT))
312 - mult1 = 80;
313 + base = 80;
314 else
315 - mult1 = 10;
316 - base = mmr_image & UV2_ACK_MASK;
317 + base = 10;
318 + mult1 = mmr_image & UV2_ACK_MASK;
319 ret = mult1 * base;
320 }
321 return ret;
322 @@ -1820,6 +1820,8 @@ static int __init uv_bau_init(void)
323 uv_base_pnode = uv_blade_to_pnode(uvhub);
324 }
325
326 + enable_timeouts();
327 +
328 if (init_per_cpu(nuvhubs, uv_base_pnode)) {
329 nobau = 1;
330 return 0;
331 @@ -1830,7 +1832,6 @@ static int __init uv_bau_init(void)
332 if (uv_blade_nr_possible_cpus(uvhub))
333 init_uvhub(uvhub, vector, uv_base_pnode);
334
335 - enable_timeouts();
336 alloc_intr_gate(vector, uv_bau_message_intr1);
337
338 for_each_possible_blade(uvhub) {
339 diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
340 index 4f4230b..5ef1f4c 100644
341 --- a/block/scsi_ioctl.c
342 +++ b/block/scsi_ioctl.c
343 @@ -24,6 +24,7 @@
344 #include <linux/capability.h>
345 #include <linux/completion.h>
346 #include <linux/cdrom.h>
347 +#include <linux/ratelimit.h>
348 #include <linux/slab.h>
349 #include <linux/times.h>
350 #include <asm/uaccess.h>
351 @@ -691,6 +692,57 @@ int scsi_cmd_ioctl(struct request_queue *q, struct gendisk *bd_disk, fmode_t mod
352 }
353 EXPORT_SYMBOL(scsi_cmd_ioctl);
354
355 +int scsi_verify_blk_ioctl(struct block_device *bd, unsigned int cmd)
356 +{
357 + if (bd && bd == bd->bd_contains)
358 + return 0;
359 +
360 + /* Actually none of these is particularly useful on a partition,
361 + * but they are safe.
362 + */
363 + switch (cmd) {
364 + case SCSI_IOCTL_GET_IDLUN:
365 + case SCSI_IOCTL_GET_BUS_NUMBER:
366 + case SCSI_IOCTL_GET_PCI:
367 + case SCSI_IOCTL_PROBE_HOST:
368 + case SG_GET_VERSION_NUM:
369 + case SG_SET_TIMEOUT:
370 + case SG_GET_TIMEOUT:
371 + case SG_GET_RESERVED_SIZE:
372 + case SG_SET_RESERVED_SIZE:
373 + case SG_EMULATED_HOST:
374 + return 0;
375 + case CDROM_GET_CAPABILITY:
376 + /* Keep this until we remove the printk below. udev sends it
377 + * and we do not want to spam dmesg about it. CD-ROMs do
378 + * not have partitions, so we get here only for disks.
379 + */
380 + return -ENOTTY;
381 + default:
382 + break;
383 + }
384 +
385 + /* In particular, rule out all resets and host-specific ioctls. */
386 + printk_ratelimited(KERN_WARNING
387 + "%s: sending ioctl %x to a partition!\n", current->comm, cmd);
388 +
389 + return capable(CAP_SYS_RAWIO) ? 0 : -ENOTTY;
390 +}
391 +EXPORT_SYMBOL(scsi_verify_blk_ioctl);
392 +
393 +int scsi_cmd_blk_ioctl(struct block_device *bd, fmode_t mode,
394 + unsigned int cmd, void __user *arg)
395 +{
396 + int ret;
397 +
398 + ret = scsi_verify_blk_ioctl(bd, cmd);
399 + if (ret < 0)
400 + return ret;
401 +
402 + return scsi_cmd_ioctl(bd->bd_disk->queue, bd->bd_disk, mode, cmd, arg);
403 +}
404 +EXPORT_SYMBOL(scsi_cmd_blk_ioctl);
405 +
406 static int __init blk_scsi_ioctl_init(void)
407 {
408 blk_set_cmd_filter_defaults(&blk_default_cmd_filter);
409 diff --git a/drivers/acpi/acpica/dsargs.c b/drivers/acpi/acpica/dsargs.c
410 index 8c7b997..42163d8 100644
411 --- a/drivers/acpi/acpica/dsargs.c
412 +++ b/drivers/acpi/acpica/dsargs.c
413 @@ -387,5 +387,29 @@ acpi_status acpi_ds_get_region_arguments(union acpi_operand_object *obj_desc)
414 status = acpi_ds_execute_arguments(node, node->parent,
415 extra_desc->extra.aml_length,
416 extra_desc->extra.aml_start);
417 + if (ACPI_FAILURE(status)) {
418 + return_ACPI_STATUS(status);
419 + }
420 +
421 + /* Validate the region address/length via the host OS */
422 +
423 + status = acpi_os_validate_address(obj_desc->region.space_id,
424 + obj_desc->region.address,
425 + (acpi_size) obj_desc->region.length,
426 + acpi_ut_get_node_name(node));
427 +
428 + if (ACPI_FAILURE(status)) {
429 + /*
430 + * Invalid address/length. We will emit an error message and mark
431 + * the region as invalid, so that it will cause an additional error if
432 + * it is ever used. Then return AE_OK.
433 + */
434 + ACPI_EXCEPTION((AE_INFO, status,
435 + "During address validation of OpRegion [%4.4s]",
436 + node->name.ascii));
437 + obj_desc->common.flags |= AOPOBJ_INVALID;
438 + status = AE_OK;
439 + }
440 +
441 return_ACPI_STATUS(status);
442 }
443 diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
444 index 3b5c318..e56f3be 100644
445 --- a/drivers/acpi/numa.c
446 +++ b/drivers/acpi/numa.c
447 @@ -45,6 +45,8 @@ static int pxm_to_node_map[MAX_PXM_DOMAINS]
448 static int node_to_pxm_map[MAX_NUMNODES]
449 = { [0 ... MAX_NUMNODES - 1] = PXM_INVAL };
450
451 +unsigned char acpi_srat_revision __initdata;
452 +
453 int pxm_to_node(int pxm)
454 {
455 if (pxm < 0)
456 @@ -255,9 +257,13 @@ acpi_parse_memory_affinity(struct acpi_subtable_header * header,
457
458 static int __init acpi_parse_srat(struct acpi_table_header *table)
459 {
460 + struct acpi_table_srat *srat;
461 if (!table)
462 return -EINVAL;
463
464 + srat = (struct acpi_table_srat *)table;
465 + acpi_srat_revision = srat->header.revision;
466 +
467 /* Real work done in acpi_table_parse_srat below. */
468
469 return 0;
470 diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
471 index 02d2a4c..0c0669f 100644
472 --- a/drivers/acpi/processor_core.c
473 +++ b/drivers/acpi/processor_core.c
474 @@ -172,8 +172,30 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
475 apic_id = map_mat_entry(handle, type, acpi_id);
476 if (apic_id == -1)
477 apic_id = map_madt_entry(type, acpi_id);
478 - if (apic_id == -1)
479 - return apic_id;
480 + if (apic_id == -1) {
481 + /*
482 + * On UP processor, there is no _MAT or MADT table.
483 + * So above apic_id is always set to -1.
484 + *
485 + * BIOS may define multiple CPU handles even for UP processor.
486 + * For example,
487 + *
488 + * Scope (_PR)
489 + * {
490 + * Processor (CPU0, 0x00, 0x00000410, 0x06) {}
491 + * Processor (CPU1, 0x01, 0x00000410, 0x06) {}
492 + * Processor (CPU2, 0x02, 0x00000410, 0x06) {}
493 + * Processor (CPU3, 0x03, 0x00000410, 0x06) {}
494 + * }
495 + *
496 + * Ignores apic_id and always return 0 for CPU0's handle.
497 + * Return -1 for other CPU's handle.
498 + */
499 + if (acpi_id == 0)
500 + return acpi_id;
501 + else
502 + return apic_id;
503 + }
504
505 #ifdef CONFIG_SMP
506 for_each_possible_cpu(i) {
507 diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
508 index c2f9b3e..1dab802 100644
509 --- a/drivers/block/cciss.c
510 +++ b/drivers/block/cciss.c
511 @@ -1716,7 +1716,7 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
512 case CCISS_BIG_PASSTHRU:
513 return cciss_bigpassthru(h, argp);
514
515 - /* scsi_cmd_ioctl handles these, below, though some are not */
516 + /* scsi_cmd_blk_ioctl handles these, below, though some are not */
517 /* very meaningful for cciss. SG_IO is the main one people want. */
518
519 case SG_GET_VERSION_NUM:
520 @@ -1727,9 +1727,9 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
521 case SG_EMULATED_HOST:
522 case SG_IO:
523 case SCSI_IOCTL_SEND_COMMAND:
524 - return scsi_cmd_ioctl(disk->queue, disk, mode, cmd, argp);
525 + return scsi_cmd_blk_ioctl(bdev, mode, cmd, argp);
526
527 - /* scsi_cmd_ioctl would normally handle these, below, but */
528 + /* scsi_cmd_blk_ioctl would normally handle these, below, but */
529 /* they aren't a good fit for cciss, as CD-ROMs are */
530 /* not supported, and we don't have any bus/target/lun */
531 /* which we present to the kernel. */
532 diff --git a/drivers/block/ub.c b/drivers/block/ub.c
533 index 0e376d4..7333b9e 100644
534 --- a/drivers/block/ub.c
535 +++ b/drivers/block/ub.c
536 @@ -1744,12 +1744,11 @@ static int ub_bd_release(struct gendisk *disk, fmode_t mode)
537 static int ub_bd_ioctl(struct block_device *bdev, fmode_t mode,
538 unsigned int cmd, unsigned long arg)
539 {
540 - struct gendisk *disk = bdev->bd_disk;
541 void __user *usermem = (void __user *) arg;
542 int ret;
543
544 mutex_lock(&ub_mutex);
545 - ret = scsi_cmd_ioctl(disk->queue, disk, mode, cmd, usermem);
546 + ret = scsi_cmd_blk_ioctl(bdev, mode, cmd, usermem);
547 mutex_unlock(&ub_mutex);
548
549 return ret;
550 diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
551 index 079c088..5d7a934 100644
552 --- a/drivers/block/virtio_blk.c
553 +++ b/drivers/block/virtio_blk.c
554 @@ -236,8 +236,8 @@ static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
555 if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
556 return -ENOTTY;
557
558 - return scsi_cmd_ioctl(disk->queue, disk, mode, cmd,
559 - (void __user *)data);
560 + return scsi_cmd_blk_ioctl(bdev, mode, cmd,
561 + (void __user *)data);
562 }
563
564 /* We provide getgeo only to please some old bootloader/partitioning tools */
565 diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
566 index 75fb965..b693cbd 100644
567 --- a/drivers/cdrom/cdrom.c
568 +++ b/drivers/cdrom/cdrom.c
569 @@ -2741,12 +2741,11 @@ int cdrom_ioctl(struct cdrom_device_info *cdi, struct block_device *bdev,
570 {
571 void __user *argp = (void __user *)arg;
572 int ret;
573 - struct gendisk *disk = bdev->bd_disk;
574
575 /*
576 * Try the generic SCSI command ioctl's first.
577 */
578 - ret = scsi_cmd_ioctl(disk->queue, disk, mode, cmd, argp);
579 + ret = scsi_cmd_blk_ioctl(bdev, mode, cmd, argp);
580 if (ret != -ENOTTY)
581 return ret;
582
583 diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
584 index b94d871..7642495 100644
585 --- a/drivers/gpu/drm/radeon/r100.c
586 +++ b/drivers/gpu/drm/radeon/r100.c
587 @@ -2069,6 +2069,7 @@ bool r100_gpu_is_lockup(struct radeon_device *rdev)
588 void r100_bm_disable(struct radeon_device *rdev)
589 {
590 u32 tmp;
591 + u16 tmp16;
592
593 /* disable bus mastering */
594 tmp = RREG32(R_000030_BUS_CNTL);
595 @@ -2079,8 +2080,8 @@ void r100_bm_disable(struct radeon_device *rdev)
596 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040);
597 tmp = RREG32(RADEON_BUS_CNTL);
598 mdelay(1);
599 - pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp);
600 - pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB);
601 + pci_read_config_word(rdev->pdev, 0x4, &tmp16);
602 + pci_write_config_word(rdev->pdev, 0x4, tmp16 & 0xFFFB);
603 mdelay(1);
604 }
605
606 diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
607 index f5ac7e7..c45d921 100644
608 --- a/drivers/gpu/drm/radeon/r600_hdmi.c
609 +++ b/drivers/gpu/drm/radeon/r600_hdmi.c
610 @@ -196,6 +196,13 @@ static void r600_hdmi_videoinfoframe(
611 frame[0xD] = (right_bar >> 8);
612
613 r600_hdmi_infoframe_checksum(0x82, 0x02, 0x0D, frame);
614 + /* Our header values (type, version, length) should be alright, Intel
615 + * is using the same. Checksum function also seems to be OK, it works
616 + * fine for audio infoframe. However calculated value is always lower
617 + * by 2 in comparison to fglrx. It breaks displaying anything in case
618 + * of TVs that strictly check the checksum. Hack it manually here to
619 + * workaround this issue. */
620 + frame[0x0] += 2;
621
622 WREG32(offset+R600_HDMI_VIDEOINFOFRAME_0,
623 frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
624 diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
625 index 440e6ec..5d0c123 100644
626 --- a/drivers/gpu/drm/radeon/radeon_device.c
627 +++ b/drivers/gpu/drm/radeon/radeon_device.c
628 @@ -223,8 +223,11 @@ int radeon_wb_init(struct radeon_device *rdev)
629 if (radeon_no_wb == 1)
630 rdev->wb.enabled = false;
631 else {
632 - /* often unreliable on AGP */
633 if (rdev->flags & RADEON_IS_AGP) {
634 + /* often unreliable on AGP */
635 + rdev->wb.enabled = false;
636 + } else if (rdev->family < CHIP_R300) {
637 + /* often unreliable on pre-r300 */
638 rdev->wb.enabled = false;
639 } else {
640 rdev->wb.enabled = true;
641 diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
642 index a37a1ef..21acfb5 100644
643 --- a/drivers/gpu/drm/radeon/rs600.c
644 +++ b/drivers/gpu/drm/radeon/rs600.c
645 @@ -324,10 +324,10 @@ void rs600_hpd_fini(struct radeon_device *rdev)
646
647 void rs600_bm_disable(struct radeon_device *rdev)
648 {
649 - u32 tmp;
650 + u16 tmp;
651
652 /* disable bus mastering */
653 - pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp);
654 + pci_read_config_word(rdev->pdev, 0x4, &tmp);
655 pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB);
656 mdelay(1);
657 }
658 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
659 index 763797d..2f855b1 100644
660 --- a/drivers/hid/hid-core.c
661 +++ b/drivers/hid/hid-core.c
662 @@ -361,7 +361,7 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
663
664 case HID_GLOBAL_ITEM_TAG_REPORT_SIZE:
665 parser->global.report_size = item_udata(item);
666 - if (parser->global.report_size > 32) {
667 + if (parser->global.report_size > 96) {
668 dbg_hid("invalid report_size %d\n",
669 parser->global.report_size);
670 return -1;
671 @@ -1382,11 +1382,13 @@ static const struct hid_device_id hid_have_special_driver[] = {
672 { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_TRUETOUCH) },
673 { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) },
674 { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0011) },
675 - { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) },
676 - { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1) },
677 - { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH2) },
678 - { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH3) },
679 - { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH4) },
680 + { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480D) },
681 + { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480E) },
682 + { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_720C) },
683 + { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_726B) },
684 + { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72A1) },
685 + { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7302) },
686 + { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001) },
687 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
688 { HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2515) },
689 { HID_USB_DEVICE(USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II) },
690 diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
691 index 206f750..e0a28ad 100644
692 --- a/drivers/hid/hid-ids.h
693 +++ b/drivers/hid/hid-ids.h
694 @@ -21,6 +21,7 @@
695 #define USB_VENDOR_ID_3M 0x0596
696 #define USB_DEVICE_ID_3M1968 0x0500
697 #define USB_DEVICE_ID_3M2256 0x0502
698 +#define USB_DEVICE_ID_3M3266 0x0506
699
700 #define USB_VENDOR_ID_A4TECH 0x09da
701 #define USB_DEVICE_ID_A4TECH_WCP32PU 0x0006
702 @@ -230,11 +231,14 @@
703
704 #define USB_VENDOR_ID_DWAV 0x0eef
705 #define USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER 0x0001
706 -#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH 0x480d
707 -#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1 0x720c
708 -#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH2 0x72a1
709 -#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH3 0x480e
710 -#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH4 0x726b
711 +#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480D 0x480d
712 +#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480E 0x480e
713 +#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_720C 0x720c
714 +#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_726B 0x726b
715 +#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72A1 0x72a1
716 +#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72FA 0x72fa
717 +#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7302 0x7302
718 +#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001 0xa001
719
720 #define USB_VENDOR_ID_ELECOM 0x056e
721 #define USB_DEVICE_ID_ELECOM_BM084 0x0061
722 diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
723 index 685d8e4..1308703 100644
724 --- a/drivers/hid/hid-multitouch.c
725 +++ b/drivers/hid/hid-multitouch.c
726 @@ -593,6 +593,9 @@ static const struct hid_device_id mt_devices[] = {
727 { .driver_data = MT_CLS_3M,
728 HID_USB_DEVICE(USB_VENDOR_ID_3M,
729 USB_DEVICE_ID_3M2256) },
730 + { .driver_data = MT_CLS_3M,
731 + HID_USB_DEVICE(USB_VENDOR_ID_3M,
732 + USB_DEVICE_ID_3M3266) },
733
734 /* ActionStar panels */
735 { .driver_data = MT_CLS_DEFAULT,
736 @@ -629,23 +632,32 @@ static const struct hid_device_id mt_devices[] = {
737 USB_DEVICE_ID_CYPRESS_TRUETOUCH) },
738
739 /* eGalax devices (resistive) */
740 - { .driver_data = MT_CLS_EGALAX,
741 + { .driver_data = MT_CLS_EGALAX,
742 HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
743 - USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) },
744 - { .driver_data = MT_CLS_EGALAX,
745 + USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480D) },
746 + { .driver_data = MT_CLS_EGALAX,
747 HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
748 - USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH3) },
749 + USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480E) },
750
751 /* eGalax devices (capacitive) */
752 - { .driver_data = MT_CLS_EGALAX,
753 + { .driver_data = MT_CLS_EGALAX,
754 + HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
755 + USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_720C) },
756 + { .driver_data = MT_CLS_EGALAX,
757 + HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
758 + USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_726B) },
759 + { .driver_data = MT_CLS_EGALAX,
760 + HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
761 + USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72A1) },
762 + { .driver_data = MT_CLS_EGALAX,
763 HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
764 - USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1) },
765 - { .driver_data = MT_CLS_EGALAX,
766 + USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72FA) },
767 + { .driver_data = MT_CLS_EGALAX,
768 HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
769 - USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH2) },
770 - { .driver_data = MT_CLS_EGALAX,
771 + USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7302) },
772 + { .driver_data = MT_CLS_EGALAX,
773 HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
774 - USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH4) },
775 + USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001) },
776
777 /* Elo TouchSystems IntelliTouch Plus panel */
778 { .driver_data = MT_CLS_DUAL_NSMU_CONTACTID,
779 diff --git a/drivers/i2c/busses/i2c-ali1535.c b/drivers/i2c/busses/i2c-ali1535.c
780 index dd36417..cd7ac5c 100644
781 --- a/drivers/i2c/busses/i2c-ali1535.c
782 +++ b/drivers/i2c/busses/i2c-ali1535.c
783 @@ -140,7 +140,7 @@ static unsigned short ali1535_smba;
784 defined to make the transition easier. */
785 static int __devinit ali1535_setup(struct pci_dev *dev)
786 {
787 - int retval = -ENODEV;
788 + int retval;
789 unsigned char temp;
790
791 /* Check the following things:
792 @@ -155,6 +155,7 @@ static int __devinit ali1535_setup(struct pci_dev *dev)
793 if (ali1535_smba == 0) {
794 dev_warn(&dev->dev,
795 "ALI1535_smb region uninitialized - upgrade BIOS?\n");
796 + retval = -ENODEV;
797 goto exit;
798 }
799
800 @@ -167,6 +168,7 @@ static int __devinit ali1535_setup(struct pci_dev *dev)
801 ali1535_driver.name)) {
802 dev_err(&dev->dev, "ALI1535_smb region 0x%x already in use!\n",
803 ali1535_smba);
804 + retval = -EBUSY;
805 goto exit;
806 }
807
808 @@ -174,6 +176,7 @@ static int __devinit ali1535_setup(struct pci_dev *dev)
809 pci_read_config_byte(dev, SMBCFG, &temp);
810 if ((temp & ALI1535_SMBIO_EN) == 0) {
811 dev_err(&dev->dev, "SMB device not enabled - upgrade BIOS?\n");
812 + retval = -ENODEV;
813 goto exit_free;
814 }
815
816 @@ -181,6 +184,7 @@ static int __devinit ali1535_setup(struct pci_dev *dev)
817 pci_read_config_byte(dev, SMBHSTCFG, &temp);
818 if ((temp & 1) == 0) {
819 dev_err(&dev->dev, "SMBus controller not enabled - upgrade BIOS?\n");
820 + retval = -ENODEV;
821 goto exit_free;
822 }
823
824 @@ -198,12 +202,11 @@ static int __devinit ali1535_setup(struct pci_dev *dev)
825 dev_dbg(&dev->dev, "SMBREV = 0x%X\n", temp);
826 dev_dbg(&dev->dev, "ALI1535_smba = 0x%X\n", ali1535_smba);
827
828 - retval = 0;
829 -exit:
830 - return retval;
831 + return 0;
832
833 exit_free:
834 release_region(ali1535_smba, ALI1535_SMB_IOSIZE);
835 +exit:
836 return retval;
837 }
838
839 diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
840 index 8abfa4a..656b028 100644
841 --- a/drivers/i2c/busses/i2c-eg20t.c
842 +++ b/drivers/i2c/busses/i2c-eg20t.c
843 @@ -242,7 +242,7 @@ static void pch_i2c_init(struct i2c_algo_pch_data *adap)
844 if (pch_clk > PCH_MAX_CLK)
845 pch_clk = 62500;
846
847 - pch_i2cbc = (pch_clk + (pch_i2c_speed * 4)) / pch_i2c_speed * 8;
848 + pch_i2cbc = (pch_clk + (pch_i2c_speed * 4)) / (pch_i2c_speed * 8);
849 /* Set transfer speed in I2CBC */
850 iowrite32(pch_i2cbc, p + PCH_I2CBC);
851
852 diff --git a/drivers/i2c/busses/i2c-nforce2.c b/drivers/i2c/busses/i2c-nforce2.c
853 index ff1e127..4853b52 100644
854 --- a/drivers/i2c/busses/i2c-nforce2.c
855 +++ b/drivers/i2c/busses/i2c-nforce2.c
856 @@ -356,7 +356,7 @@ static int __devinit nforce2_probe_smb (struct pci_dev *dev, int bar,
857 error = acpi_check_region(smbus->base, smbus->size,
858 nforce2_driver.name);
859 if (error)
860 - return -1;
861 + return error;
862
863 if (!request_region(smbus->base, smbus->size, nforce2_driver.name)) {
864 dev_err(&smbus->adapter.dev, "Error requesting region %02x .. %02X for %s\n",
865 diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
866 index 58a58c7..137e1a3 100644
867 --- a/drivers/i2c/busses/i2c-omap.c
868 +++ b/drivers/i2c/busses/i2c-omap.c
869 @@ -235,7 +235,7 @@ const static u8 omap4_reg_map[] = {
870 [OMAP_I2C_BUF_REG] = 0x94,
871 [OMAP_I2C_CNT_REG] = 0x98,
872 [OMAP_I2C_DATA_REG] = 0x9c,
873 - [OMAP_I2C_SYSC_REG] = 0x20,
874 + [OMAP_I2C_SYSC_REG] = 0x10,
875 [OMAP_I2C_CON_REG] = 0xa4,
876 [OMAP_I2C_OA_REG] = 0xa8,
877 [OMAP_I2C_SA_REG] = 0xac,
878 diff --git a/drivers/i2c/busses/i2c-sis5595.c b/drivers/i2c/busses/i2c-sis5595.c
879 index 4375866..6d60284 100644
880 --- a/drivers/i2c/busses/i2c-sis5595.c
881 +++ b/drivers/i2c/busses/i2c-sis5595.c
882 @@ -147,7 +147,7 @@ static int __devinit sis5595_setup(struct pci_dev *SIS5595_dev)
883 u16 a;
884 u8 val;
885 int *i;
886 - int retval = -ENODEV;
887 + int retval;
888
889 /* Look for imposters */
890 for (i = blacklist; *i != 0; i++) {
891 @@ -223,7 +223,7 @@ static int __devinit sis5595_setup(struct pci_dev *SIS5595_dev)
892
893 error:
894 release_region(sis5595_base + SMB_INDEX, 2);
895 - return retval;
896 + return -ENODEV;
897 }
898
899 static int sis5595_transaction(struct i2c_adapter *adap)
900 diff --git a/drivers/i2c/busses/i2c-sis630.c b/drivers/i2c/busses/i2c-sis630.c
901 index e6f539e..b617fd0 100644
902 --- a/drivers/i2c/busses/i2c-sis630.c
903 +++ b/drivers/i2c/busses/i2c-sis630.c
904 @@ -393,7 +393,7 @@ static int __devinit sis630_setup(struct pci_dev *sis630_dev)
905 {
906 unsigned char b;
907 struct pci_dev *dummy = NULL;
908 - int retval = -ENODEV, i;
909 + int retval, i;
910
911 /* check for supported SiS devices */
912 for (i=0; supported[i] > 0 ; i++) {
913 @@ -418,18 +418,21 @@ static int __devinit sis630_setup(struct pci_dev *sis630_dev)
914 */
915 if (pci_read_config_byte(sis630_dev, SIS630_BIOS_CTL_REG,&b)) {
916 dev_err(&sis630_dev->dev, "Error: Can't read bios ctl reg\n");
917 + retval = -ENODEV;
918 goto exit;
919 }
920 /* if ACPI already enabled , do nothing */
921 if (!(b & 0x80) &&
922 pci_write_config_byte(sis630_dev, SIS630_BIOS_CTL_REG, b | 0x80)) {
923 dev_err(&sis630_dev->dev, "Error: Can't enable ACPI\n");
924 + retval = -ENODEV;
925 goto exit;
926 }
927
928 /* Determine the ACPI base address */
929 if (pci_read_config_word(sis630_dev,SIS630_ACPI_BASE_REG,&acpi_base)) {
930 dev_err(&sis630_dev->dev, "Error: Can't determine ACPI base address\n");
931 + retval = -ENODEV;
932 goto exit;
933 }
934
935 @@ -445,6 +448,7 @@ static int __devinit sis630_setup(struct pci_dev *sis630_dev)
936 sis630_driver.name)) {
937 dev_err(&sis630_dev->dev, "SMBus registers 0x%04x-0x%04x already "
938 "in use!\n", acpi_base + SMB_STS, acpi_base + SMB_SAA);
939 + retval = -EBUSY;
940 goto exit;
941 }
942
943 diff --git a/drivers/i2c/busses/i2c-viapro.c b/drivers/i2c/busses/i2c-viapro.c
944 index 0b012f1..58261d4 100644
945 --- a/drivers/i2c/busses/i2c-viapro.c
946 +++ b/drivers/i2c/busses/i2c-viapro.c
947 @@ -324,7 +324,7 @@ static int __devinit vt596_probe(struct pci_dev *pdev,
948 const struct pci_device_id *id)
949 {
950 unsigned char temp;
951 - int error = -ENODEV;
952 + int error;
953
954 /* Determine the address of the SMBus areas */
955 if (force_addr) {
956 @@ -390,6 +390,7 @@ found:
957 dev_err(&pdev->dev, "SMBUS: Error: Host SMBus "
958 "controller not enabled! - upgrade BIOS or "
959 "use force=1\n");
960 + error = -ENODEV;
961 goto release_region;
962 }
963 }
964 @@ -422,9 +423,11 @@ found:
965 "SMBus Via Pro adapter at %04x", vt596_smba);
966
967 vt596_pdev = pci_dev_get(pdev);
968 - if (i2c_add_adapter(&vt596_adapter)) {
969 + error = i2c_add_adapter(&vt596_adapter);
970 + if (error) {
971 pci_dev_put(vt596_pdev);
972 vt596_pdev = NULL;
973 + goto release_region;
974 }
975
976 /* Always return failure here. This is to allow other drivers to bind
977 diff --git a/drivers/ide/ide-floppy_ioctl.c b/drivers/ide/ide-floppy_ioctl.c
978 index d267b7a..a22ca84 100644
979 --- a/drivers/ide/ide-floppy_ioctl.c
980 +++ b/drivers/ide/ide-floppy_ioctl.c
981 @@ -292,8 +292,7 @@ int ide_floppy_ioctl(ide_drive_t *drive, struct block_device *bdev,
982 * and CDROM_SEND_PACKET (legacy) ioctls
983 */
984 if (cmd != CDROM_SEND_PACKET && cmd != SCSI_IOCTL_SEND_COMMAND)
985 - err = scsi_cmd_ioctl(bdev->bd_disk->queue, bdev->bd_disk,
986 - mode, cmd, argp);
987 + err = scsi_cmd_blk_ioctl(bdev, mode, cmd, argp);
988
989 if (err == -ENOTTY)
990 err = generic_ide_ioctl(drive, bdev, cmd, arg);
991 diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
992 index a46dddf..026f9aa 100644
993 --- a/drivers/idle/intel_idle.c
994 +++ b/drivers/idle/intel_idle.c
995 @@ -321,7 +321,8 @@ static int intel_idle_probe(void)
996 cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates);
997
998 if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
999 - !(ecx & CPUID5_ECX_INTERRUPT_BREAK))
1000 + !(ecx & CPUID5_ECX_INTERRUPT_BREAK) ||
1001 + !mwait_substates)
1002 return -ENODEV;
1003
1004 pr_debug(PREFIX "MWAIT substates: 0x%x\n", mwait_substates);
1005 @@ -367,7 +368,7 @@ static int intel_idle_probe(void)
1006 if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */
1007 lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE;
1008 else {
1009 - smp_call_function(__setup_broadcast_timer, (void *)true, 1);
1010 + on_each_cpu(__setup_broadcast_timer, (void *)true, 1);
1011 register_cpu_notifier(&setup_broadcast_notifier);
1012 }
1013
1014 @@ -459,7 +460,7 @@ static int intel_idle_cpuidle_devices_init(void)
1015 }
1016 }
1017 if (auto_demotion_disable_flags)
1018 - smp_call_function(auto_demotion_disable, NULL, 1);
1019 + on_each_cpu(auto_demotion_disable, NULL, 1);
1020
1021 return 0;
1022 }
1023 @@ -499,7 +500,7 @@ static void __exit intel_idle_exit(void)
1024 cpuidle_unregister_driver(&intel_idle_driver);
1025
1026 if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE) {
1027 - smp_call_function(__setup_broadcast_timer, (void *)false, 1);
1028 + on_each_cpu(__setup_broadcast_timer, (void *)false, 1);
1029 unregister_cpu_notifier(&setup_broadcast_notifier);
1030 }
1031
1032 diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
1033 index ea79062..3e90b80 100644
1034 --- a/drivers/md/dm-flakey.c
1035 +++ b/drivers/md/dm-flakey.c
1036 @@ -149,8 +149,17 @@ static int flakey_status(struct dm_target *ti, status_type_t type,
1037 static int flakey_ioctl(struct dm_target *ti, unsigned int cmd, unsigned long arg)
1038 {
1039 struct flakey_c *fc = ti->private;
1040 + struct dm_dev *dev = fc->dev;
1041 + int r = 0;
1042
1043 - return __blkdev_driver_ioctl(fc->dev->bdev, fc->dev->mode, cmd, arg);
1044 + /*
1045 + * Only pass ioctls through if the device sizes match exactly.
1046 + */
1047 + if (fc->start ||
1048 + ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
1049 + r = scsi_verify_blk_ioctl(NULL, cmd);
1050 +
1051 + return r ? : __blkdev_driver_ioctl(dev->bdev, dev->mode, cmd, arg);
1052 }
1053
1054 static int flakey_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
1055 diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
1056 index 3921e3b..9728839 100644
1057 --- a/drivers/md/dm-linear.c
1058 +++ b/drivers/md/dm-linear.c
1059 @@ -116,7 +116,17 @@ static int linear_ioctl(struct dm_target *ti, unsigned int cmd,
1060 unsigned long arg)
1061 {
1062 struct linear_c *lc = (struct linear_c *) ti->private;
1063 - return __blkdev_driver_ioctl(lc->dev->bdev, lc->dev->mode, cmd, arg);
1064 + struct dm_dev *dev = lc->dev;
1065 + int r = 0;
1066 +
1067 + /*
1068 + * Only pass ioctls through if the device sizes match exactly.
1069 + */
1070 + if (lc->start ||
1071 + ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
1072 + r = scsi_verify_blk_ioctl(NULL, cmd);
1073 +
1074 + return r ? : __blkdev_driver_ioctl(dev->bdev, dev->mode, cmd, arg);
1075 }
1076
1077 static int linear_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
1078 diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
1079 index 209991b..70373bf 100644
1080 --- a/drivers/md/dm-mpath.c
1081 +++ b/drivers/md/dm-mpath.c
1082 @@ -1584,6 +1584,12 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
1083
1084 spin_unlock_irqrestore(&m->lock, flags);
1085
1086 + /*
1087 + * Only pass ioctls through if the device sizes match exactly.
1088 + */
1089 + if (!r && ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT)
1090 + r = scsi_verify_blk_ioctl(NULL, cmd);
1091 +
1092 return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
1093 }
1094
1095 diff --git a/drivers/media/video/uvc/uvc_v4l2.c b/drivers/media/video/uvc/uvc_v4l2.c
1096 index 543a803..dbefdb0 100644
1097 --- a/drivers/media/video/uvc/uvc_v4l2.c
1098 +++ b/drivers/media/video/uvc/uvc_v4l2.c
1099 @@ -65,6 +65,15 @@ static int uvc_ioctl_ctrl_map(struct uvc_video_chain *chain,
1100 goto done;
1101 }
1102
1103 + /* Prevent excessive memory consumption, as well as integer
1104 + * overflows.
1105 + */
1106 + if (xmap->menu_count == 0 ||
1107 + xmap->menu_count > UVC_MAX_CONTROL_MENU_ENTRIES) {
1108 + ret = -EINVAL;
1109 + goto done;
1110 + }
1111 +
1112 size = xmap->menu_count * sizeof(*map->menu_info);
1113 map->menu_info = kmalloc(size, GFP_KERNEL);
1114 if (map->menu_info == NULL) {
1115 diff --git a/drivers/media/video/uvc/uvcvideo.h b/drivers/media/video/uvc/uvcvideo.h
1116 index 2a38d5e..cf2401a 100644
1117 --- a/drivers/media/video/uvc/uvcvideo.h
1118 +++ b/drivers/media/video/uvc/uvcvideo.h
1119 @@ -200,6 +200,7 @@ struct uvc_xu_control {
1120
1121 /* Maximum allowed number of control mappings per device */
1122 #define UVC_MAX_CONTROL_MAPPINGS 1024
1123 +#define UVC_MAX_CONTROL_MENU_ENTRIES 32
1124
1125 /* Devices quirks */
1126 #define UVC_QUIRK_STATUS_INTERVAL 0x00000001
1127 diff --git a/drivers/media/video/v4l2-ioctl.c b/drivers/media/video/v4l2-ioctl.c
1128 index 69e8c6f..bda252f 100644
1129 --- a/drivers/media/video/v4l2-ioctl.c
1130 +++ b/drivers/media/video/v4l2-ioctl.c
1131 @@ -2289,6 +2289,10 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
1132 struct v4l2_ext_controls *ctrls = parg;
1133
1134 if (ctrls->count != 0) {
1135 + if (ctrls->count > V4L2_CID_MAX_CTRLS) {
1136 + ret = -EINVAL;
1137 + break;
1138 + }
1139 *user_ptr = (void __user *)ctrls->controls;
1140 *kernel_ptr = (void **)&ctrls->controls;
1141 *array_size = sizeof(struct v4l2_ext_control)
1142 diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
1143 index 20b42c8..f601180 100644
1144 --- a/drivers/mmc/core/mmc.c
1145 +++ b/drivers/mmc/core/mmc.c
1146 @@ -830,7 +830,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
1147 *
1148 * WARNING: eMMC rules are NOT the same as SD DDR
1149 */
1150 - if (ddr == EXT_CSD_CARD_TYPE_DDR_1_2V) {
1151 + if (ddr == MMC_1_2V_DDR_MODE) {
1152 err = mmc_set_signal_voltage(host,
1153 MMC_SIGNAL_VOLTAGE_120, 0);
1154 if (err)
1155 diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
1156 index 6d3de08..153008f 100644
1157 --- a/drivers/mmc/host/sdhci.c
1158 +++ b/drivers/mmc/host/sdhci.c
1159 @@ -1340,8 +1340,7 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1160 if ((ios->timing == MMC_TIMING_UHS_SDR50) ||
1161 (ios->timing == MMC_TIMING_UHS_SDR104) ||
1162 (ios->timing == MMC_TIMING_UHS_DDR50) ||
1163 - (ios->timing == MMC_TIMING_UHS_SDR25) ||
1164 - (ios->timing == MMC_TIMING_UHS_SDR12))
1165 + (ios->timing == MMC_TIMING_UHS_SDR25))
1166 ctrl |= SDHCI_CTRL_HISPD;
1167
1168 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1169 @@ -2227,9 +2226,8 @@ int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state)
1170 /* Disable tuning since we are suspending */
1171 if (host->version >= SDHCI_SPEC_300 && host->tuning_count &&
1172 host->tuning_mode == SDHCI_TUNING_MODE_1) {
1173 + del_timer_sync(&host->tuning_timer);
1174 host->flags &= ~SDHCI_NEEDS_RETUNING;
1175 - mod_timer(&host->tuning_timer, jiffies +
1176 - host->tuning_count * HZ);
1177 }
1178
1179 ret = mmc_suspend_host(host->mmc);
1180 diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
1181 index ca38569..bff8d46 100644
1182 --- a/drivers/mtd/mtd_blkdevs.c
1183 +++ b/drivers/mtd/mtd_blkdevs.c
1184 @@ -215,7 +215,7 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode)
1185
1186 mutex_lock(&dev->lock);
1187
1188 - if (dev->open++)
1189 + if (dev->open)
1190 goto unlock;
1191
1192 kref_get(&dev->ref);
1193 @@ -235,6 +235,7 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode)
1194 goto error_release;
1195
1196 unlock:
1197 + dev->open++;
1198 mutex_unlock(&dev->lock);
1199 blktrans_dev_put(dev);
1200 return ret;
1201 diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
1202 index e3e40f4..43130e8 100644
1203 --- a/drivers/mtd/mtdoops.c
1204 +++ b/drivers/mtd/mtdoops.c
1205 @@ -253,6 +253,9 @@ static void find_next_position(struct mtdoops_context *cxt)
1206 size_t retlen;
1207
1208 for (page = 0; page < cxt->oops_pages; page++) {
1209 + if (mtd->block_isbad &&
1210 + mtd->block_isbad(mtd, page * record_size))
1211 + continue;
1212 /* Assume the page is used */
1213 mark_page_used(cxt, page);
1214 ret = mtd->read(mtd, page * record_size, MTDOOPS_HEADER_SIZE,
1215 @@ -369,7 +372,7 @@ static void mtdoops_notify_add(struct mtd_info *mtd)
1216
1217 /* oops_page_used is a bit field */
1218 cxt->oops_page_used = vmalloc(DIV_ROUND_UP(mtdoops_pages,
1219 - BITS_PER_LONG));
1220 + BITS_PER_LONG) * sizeof(unsigned long));
1221 if (!cxt->oops_page_used) {
1222 printk(KERN_ERR "mtdoops: could not allocate page array\n");
1223 return;
1224 diff --git a/drivers/mtd/tests/mtd_stresstest.c b/drivers/mtd/tests/mtd_stresstest.c
1225 index 531625f..129bad2 100644
1226 --- a/drivers/mtd/tests/mtd_stresstest.c
1227 +++ b/drivers/mtd/tests/mtd_stresstest.c
1228 @@ -277,6 +277,12 @@ static int __init mtd_stresstest_init(void)
1229 (unsigned long long)mtd->size, mtd->erasesize,
1230 pgsize, ebcnt, pgcnt, mtd->oobsize);
1231
1232 + if (ebcnt < 2) {
1233 + printk(PRINT_PREF "error: need at least 2 eraseblocks\n");
1234 + err = -ENOSPC;
1235 + goto out_put_mtd;
1236 + }
1237 +
1238 /* Read or write up 2 eraseblocks at a time */
1239 bufsize = mtd->erasesize * 2;
1240
1241 @@ -315,6 +321,7 @@ out:
1242 kfree(bbt);
1243 vfree(writebuf);
1244 vfree(readbuf);
1245 +out_put_mtd:
1246 put_mtd_device(mtd);
1247 if (err)
1248 printk(PRINT_PREF "error %d occurred\n", err);
1249 diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
1250 index 191f3bb..cdea669 100644
1251 --- a/drivers/mtd/ubi/cdev.c
1252 +++ b/drivers/mtd/ubi/cdev.c
1253 @@ -628,6 +628,9 @@ static int verify_mkvol_req(const struct ubi_device *ubi,
1254 if (req->alignment != 1 && n)
1255 goto bad;
1256
1257 + if (!req->name[0] || !req->name_len)
1258 + goto bad;
1259 +
1260 if (req->name_len > UBI_VOL_NAME_MAX) {
1261 err = -ENAMETOOLONG;
1262 goto bad;
1263 diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
1264 index 3f1a09c..5f0e4c2 100644
1265 --- a/drivers/mtd/ubi/debug.h
1266 +++ b/drivers/mtd/ubi/debug.h
1267 @@ -51,7 +51,10 @@ struct ubi_mkvol_req;
1268 pr_debug("UBI DBG " type ": " fmt "\n", ##__VA_ARGS__)
1269
1270 /* Just a debugging messages not related to any specific UBI subsystem */
1271 -#define dbg_msg(fmt, ...) ubi_dbg_msg("msg", fmt, ##__VA_ARGS__)
1272 +#define dbg_msg(fmt, ...) \
1273 + printk(KERN_DEBUG "UBI DBG (pid %d): %s: " fmt "\n", \
1274 + current->pid, __func__, ##__VA_ARGS__)
1275 +
1276 /* General debugging messages */
1277 #define dbg_gen(fmt, ...) ubi_dbg_msg("gen", fmt, ##__VA_ARGS__)
1278 /* Messages from the eraseblock association sub-system */
1279 diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
1280 index 4be6718..c696c94 100644
1281 --- a/drivers/mtd/ubi/eba.c
1282 +++ b/drivers/mtd/ubi/eba.c
1283 @@ -1028,12 +1028,14 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1284 * 'ubi_wl_put_peb()' function on the @ubi->move_mutex. In turn, we are
1285 * holding @ubi->move_mutex and go sleep on the LEB lock. So, if the
1286 * LEB is already locked, we just do not move it and return
1287 - * %MOVE_CANCEL_RACE, which means that UBI will re-try, but later.
1288 + * %MOVE_RETRY. Note, we do not return %MOVE_CANCEL_RACE here because
1289 + * we do not know the reasons of the contention - it may be just a
1290 + * normal I/O on this LEB, so we want to re-try.
1291 */
1292 err = leb_write_trylock(ubi, vol_id, lnum);
1293 if (err) {
1294 dbg_wl("contention on LEB %d:%d, cancel", vol_id, lnum);
1295 - return MOVE_CANCEL_RACE;
1296 + return MOVE_RETRY;
1297 }
1298
1299 /*
1300 diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
1301 index c6c2229..bbfa88d 100644
1302 --- a/drivers/mtd/ubi/ubi.h
1303 +++ b/drivers/mtd/ubi/ubi.h
1304 @@ -121,6 +121,7 @@ enum {
1305 * PEB
1306 * MOVE_CANCEL_BITFLIPS: canceled because a bit-flip was detected in the
1307 * target PEB
1308 + * MOVE_RETRY: retry scrubbing the PEB
1309 */
1310 enum {
1311 MOVE_CANCEL_RACE = 1,
1312 @@ -128,6 +129,7 @@ enum {
1313 MOVE_TARGET_RD_ERR,
1314 MOVE_TARGET_WR_ERR,
1315 MOVE_CANCEL_BITFLIPS,
1316 + MOVE_RETRY,
1317 };
1318
1319 /**
1320 diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
1321 index ff2c495..12e44c9 100644
1322 --- a/drivers/mtd/ubi/wl.c
1323 +++ b/drivers/mtd/ubi/wl.c
1324 @@ -792,7 +792,10 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
1325 protect = 1;
1326 goto out_not_moved;
1327 }
1328 -
1329 + if (err == MOVE_RETRY) {
1330 + scrubbing = 1;
1331 + goto out_not_moved;
1332 + }
1333 if (err == MOVE_CANCEL_BITFLIPS || err == MOVE_TARGET_WR_ERR ||
1334 err == MOVE_TARGET_RD_ERR) {
1335 /*
1336 @@ -1046,7 +1049,6 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1337
1338 ubi_err("failed to erase PEB %d, error %d", pnum, err);
1339 kfree(wl_wrk);
1340 - kmem_cache_free(ubi_wl_entry_slab, e);
1341
1342 if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
1343 err == -EBUSY) {
1344 @@ -1059,14 +1061,16 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1345 goto out_ro;
1346 }
1347 return err;
1348 - } else if (err != -EIO) {
1349 + }
1350 +
1351 + kmem_cache_free(ubi_wl_entry_slab, e);
1352 + if (err != -EIO)
1353 /*
1354 * If this is not %-EIO, we have no idea what to do. Scheduling
1355 * this physical eraseblock for erasure again would cause
1356 * errors again and again. Well, lets switch to R/O mode.
1357 */
1358 goto out_ro;
1359 - }
1360
1361 /* It is %-EIO, the PEB went bad */
1362
1363 diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
1364 index 47c8339a..2843c90 100644
1365 --- a/drivers/net/phy/mdio-gpio.c
1366 +++ b/drivers/net/phy/mdio-gpio.c
1367 @@ -241,7 +241,7 @@ MODULE_DEVICE_TABLE(of, mdio_ofgpio_match);
1368
1369 static struct platform_driver mdio_ofgpio_driver = {
1370 .driver = {
1371 - .name = "mdio-gpio",
1372 + .name = "mdio-ofgpio",
1373 .owner = THIS_MODULE,
1374 .of_match_table = mdio_ofgpio_match,
1375 },
1376 diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c
1377 index 421d5c8..a935585 100644
1378 --- a/drivers/net/wireless/iwlegacy/iwl3945-base.c
1379 +++ b/drivers/net/wireless/iwlegacy/iwl3945-base.c
1380 @@ -2910,14 +2910,13 @@ int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
1381 IWL_WARN(priv, "Invalid scan band\n");
1382 return -EIO;
1383 }
1384 -
1385 /*
1386 - * If active scaning is requested but a certain channel
1387 - * is marked passive, we can do active scanning if we
1388 - * detect transmissions.
1389 + * If active scaning is requested but a certain channel is marked
1390 + * passive, we can do active scanning if we detect transmissions. For
1391 + * passive only scanning disable switching to active on any channel.
1392 */
1393 scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
1394 - IWL_GOOD_CRC_TH_DISABLED;
1395 + IWL_GOOD_CRC_TH_NEVER;
1396
1397 if (!priv->is_internal_short_scan) {
1398 scan->tx_cmd.len = cpu_to_le16(
1399 diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
1400 index f803fb6..857cf61 100644
1401 --- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
1402 +++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
1403 @@ -2023,6 +2023,7 @@ static int iwl_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt)
1404 case IEEE80211_SMPS_STATIC:
1405 case IEEE80211_SMPS_DYNAMIC:
1406 return IWL_NUM_IDLE_CHAINS_SINGLE;
1407 + case IEEE80211_SMPS_AUTOMATIC:
1408 case IEEE80211_SMPS_OFF:
1409 return active_cnt;
1410 default:
1411 diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
1412 index 39a3c9c..272bcdf 100644
1413 --- a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
1414 +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
1415 @@ -442,6 +442,9 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
1416
1417 mutex_lock(&priv->mutex);
1418
1419 + if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1420 + goto out;
1421 +
1422 if (unlikely(test_bit(STATUS_SCANNING, &priv->status))) {
1423 IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
1424 goto out;
1425 diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
1426 index 55cd3e1..dab7dc1 100644
1427 --- a/drivers/net/wireless/rt2x00/rt2800pci.c
1428 +++ b/drivers/net/wireless/rt2x00/rt2800pci.c
1429 @@ -426,7 +426,6 @@ static int rt2800pci_init_queues(struct rt2x00_dev *rt2x00dev)
1430 static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
1431 enum dev_state state)
1432 {
1433 - int mask = (state == STATE_RADIO_IRQ_ON);
1434 u32 reg;
1435 unsigned long flags;
1436
1437 @@ -448,25 +447,14 @@ static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
1438 }
1439
1440 spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
1441 - rt2x00pci_register_read(rt2x00dev, INT_MASK_CSR, &reg);
1442 - rt2x00_set_field32(&reg, INT_MASK_CSR_RXDELAYINT, 0);
1443 - rt2x00_set_field32(&reg, INT_MASK_CSR_TXDELAYINT, 0);
1444 - rt2x00_set_field32(&reg, INT_MASK_CSR_RX_DONE, mask);
1445 - rt2x00_set_field32(&reg, INT_MASK_CSR_AC0_DMA_DONE, 0);
1446 - rt2x00_set_field32(&reg, INT_MASK_CSR_AC1_DMA_DONE, 0);
1447 - rt2x00_set_field32(&reg, INT_MASK_CSR_AC2_DMA_DONE, 0);
1448 - rt2x00_set_field32(&reg, INT_MASK_CSR_AC3_DMA_DONE, 0);
1449 - rt2x00_set_field32(&reg, INT_MASK_CSR_HCCA_DMA_DONE, 0);
1450 - rt2x00_set_field32(&reg, INT_MASK_CSR_MGMT_DMA_DONE, 0);
1451 - rt2x00_set_field32(&reg, INT_MASK_CSR_MCU_COMMAND, 0);
1452 - rt2x00_set_field32(&reg, INT_MASK_CSR_RXTX_COHERENT, 0);
1453 - rt2x00_set_field32(&reg, INT_MASK_CSR_TBTT, mask);
1454 - rt2x00_set_field32(&reg, INT_MASK_CSR_PRE_TBTT, mask);
1455 - rt2x00_set_field32(&reg, INT_MASK_CSR_TX_FIFO_STATUS, mask);
1456 - rt2x00_set_field32(&reg, INT_MASK_CSR_AUTO_WAKEUP, mask);
1457 - rt2x00_set_field32(&reg, INT_MASK_CSR_GPTIMER, 0);
1458 - rt2x00_set_field32(&reg, INT_MASK_CSR_RX_COHERENT, 0);
1459 - rt2x00_set_field32(&reg, INT_MASK_CSR_TX_COHERENT, 0);
1460 + reg = 0;
1461 + if (state == STATE_RADIO_IRQ_ON) {
1462 + rt2x00_set_field32(&reg, INT_MASK_CSR_RX_DONE, 1);
1463 + rt2x00_set_field32(&reg, INT_MASK_CSR_TBTT, 1);
1464 + rt2x00_set_field32(&reg, INT_MASK_CSR_PRE_TBTT, 1);
1465 + rt2x00_set_field32(&reg, INT_MASK_CSR_TX_FIFO_STATUS, 1);
1466 + rt2x00_set_field32(&reg, INT_MASK_CSR_AUTO_WAKEUP, 1);
1467 + }
1468 rt2x00pci_register_write(rt2x00dev, INT_MASK_CSR, reg);
1469 spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
1470
1471 diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/fw.c b/drivers/net/wireless/rtlwifi/rtl8192se/fw.c
1472 index 3b5af01..0c77a14 100644
1473 --- a/drivers/net/wireless/rtlwifi/rtl8192se/fw.c
1474 +++ b/drivers/net/wireless/rtlwifi/rtl8192se/fw.c
1475 @@ -196,6 +196,8 @@ static bool _rtl92s_firmware_downloadcode(struct ieee80211_hw *hw,
1476 /* Allocate skb buffer to contain firmware */
1477 /* info and tx descriptor info. */
1478 skb = dev_alloc_skb(frag_length);
1479 + if (!skb)
1480 + return false;
1481 skb_reserve(skb, extra_descoffset);
1482 seg_ptr = (u8 *)skb_put(skb, (u32)(frag_length -
1483 extra_descoffset));
1484 @@ -575,6 +577,8 @@ static bool _rtl92s_firmware_set_h2c_cmd(struct ieee80211_hw *hw, u8 h2c_cmd,
1485
1486 len = _rtl92s_get_h2c_cmdlen(MAX_TRANSMIT_BUFFER_SIZE, 1, &cmd_len);
1487 skb = dev_alloc_skb(len);
1488 + if (!skb)
1489 + return false;
1490 cb_desc = (struct rtl_tcb_desc *)(skb->cb);
1491 cb_desc->queue_index = TXCMD_QUEUE;
1492 cb_desc->cmd_or_init = DESC_PACKET_TYPE_NORMAL;
1493 diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
1494 index 2f10328..e174982 100644
1495 --- a/drivers/pci/msi.c
1496 +++ b/drivers/pci/msi.c
1497 @@ -869,5 +869,15 @@ EXPORT_SYMBOL(pci_msi_enabled);
1498
1499 void pci_msi_init_pci_dev(struct pci_dev *dev)
1500 {
1501 + int pos;
1502 INIT_LIST_HEAD(&dev->msi_list);
1503 +
1504 + /* Disable the msi hardware to avoid screaming interrupts
1505 + * during boot. This is the power on reset default so
1506 + * usually this should be a noop.
1507 + */
1508 + pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
1509 + if (pos)
1510 + msi_set_enable(dev, pos, 0);
1511 + msix_set_enable(dev, 0);
1512 }
1513 diff --git a/drivers/pnp/quirks.c b/drivers/pnp/quirks.c
1514 index dfbd5a6..258fef2 100644
1515 --- a/drivers/pnp/quirks.c
1516 +++ b/drivers/pnp/quirks.c
1517 @@ -295,6 +295,45 @@ static void quirk_system_pci_resources(struct pnp_dev *dev)
1518 }
1519 }
1520
1521 +#ifdef CONFIG_AMD_NB
1522 +
1523 +#include <asm/amd_nb.h>
1524 +
1525 +static void quirk_amd_mmconfig_area(struct pnp_dev *dev)
1526 +{
1527 + resource_size_t start, end;
1528 + struct pnp_resource *pnp_res;
1529 + struct resource *res;
1530 + struct resource mmconfig_res, *mmconfig;
1531 +
1532 + mmconfig = amd_get_mmconfig_range(&mmconfig_res);
1533 + if (!mmconfig)
1534 + return;
1535 +
1536 + list_for_each_entry(pnp_res, &dev->resources, list) {
1537 + res = &pnp_res->res;
1538 + if (res->end < mmconfig->start || res->start > mmconfig->end ||
1539 + (res->start == mmconfig->start && res->end == mmconfig->end))
1540 + continue;
1541 +
1542 + dev_info(&dev->dev, FW_BUG
1543 + "%pR covers only part of AMD MMCONFIG area %pR; adding more reservations\n",
1544 + res, mmconfig);
1545 + if (mmconfig->start < res->start) {
1546 + start = mmconfig->start;
1547 + end = res->start - 1;
1548 + pnp_add_mem_resource(dev, start, end, 0);
1549 + }
1550 + if (mmconfig->end > res->end) {
1551 + start = res->end + 1;
1552 + end = mmconfig->end;
1553 + pnp_add_mem_resource(dev, start, end, 0);
1554 + }
1555 + break;
1556 + }
1557 +}
1558 +#endif
1559 +
1560 /*
1561 * PnP Quirks
1562 * Cards or devices that need some tweaking due to incomplete resource info
1563 @@ -322,6 +361,9 @@ static struct pnp_fixup pnp_fixups[] = {
1564 /* PnP resources that might overlap PCI BARs */
1565 {"PNP0c01", quirk_system_pci_resources},
1566 {"PNP0c02", quirk_system_pci_resources},
1567 +#ifdef CONFIG_AMD_NB
1568 + {"PNP0c01", quirk_amd_mmconfig_area},
1569 +#endif
1570 {""}
1571 };
1572
1573 diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
1574 index eb4c883..38d1dc7 100644
1575 --- a/drivers/rtc/interface.c
1576 +++ b/drivers/rtc/interface.c
1577 @@ -227,11 +227,11 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
1578 alarm->time.tm_hour = now.tm_hour;
1579
1580 /* For simplicity, only support date rollover for now */
1581 - if (alarm->time.tm_mday == -1) {
1582 + if (alarm->time.tm_mday < 1 || alarm->time.tm_mday > 31) {
1583 alarm->time.tm_mday = now.tm_mday;
1584 missing = day;
1585 }
1586 - if (alarm->time.tm_mon == -1) {
1587 + if ((unsigned)alarm->time.tm_mon >= 12) {
1588 alarm->time.tm_mon = now.tm_mon;
1589 if (missing == none)
1590 missing = month;
1591 diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
1592 index 39e81cd..10f16a3 100644
1593 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c
1594 +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
1595 @@ -66,6 +66,8 @@ static MPT_CALLBACK mpt_callbacks[MPT_MAX_CALLBACKS];
1596
1597 #define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */
1598
1599 +#define MAX_HBA_QUEUE_DEPTH 30000
1600 +#define MAX_CHAIN_DEPTH 100000
1601 static int max_queue_depth = -1;
1602 module_param(max_queue_depth, int, 0);
1603 MODULE_PARM_DESC(max_queue_depth, " max controller queue depth ");
1604 @@ -2098,8 +2100,6 @@ _base_release_memory_pools(struct MPT2SAS_ADAPTER *ioc)
1605 }
1606 if (ioc->chain_dma_pool)
1607 pci_pool_destroy(ioc->chain_dma_pool);
1608 - }
1609 - if (ioc->chain_lookup) {
1610 free_pages((ulong)ioc->chain_lookup, ioc->chain_pages);
1611 ioc->chain_lookup = NULL;
1612 }
1613 @@ -2117,9 +2117,7 @@ static int
1614 _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
1615 {
1616 struct mpt2sas_facts *facts;
1617 - u32 queue_size, queue_diff;
1618 u16 max_sge_elements;
1619 - u16 num_of_reply_frames;
1620 u16 chains_needed_per_io;
1621 u32 sz, total_sz;
1622 u32 retry_sz;
1623 @@ -2146,7 +2144,8 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
1624 max_request_credit = (max_queue_depth < facts->RequestCredit)
1625 ? max_queue_depth : facts->RequestCredit;
1626 else
1627 - max_request_credit = facts->RequestCredit;
1628 + max_request_credit = min_t(u16, facts->RequestCredit,
1629 + MAX_HBA_QUEUE_DEPTH);
1630
1631 ioc->hba_queue_depth = max_request_credit;
1632 ioc->hi_priority_depth = facts->HighPriorityCredit;
1633 @@ -2187,50 +2186,25 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
1634 }
1635 ioc->chains_needed_per_io = chains_needed_per_io;
1636
1637 - /* reply free queue sizing - taking into account for events */
1638 - num_of_reply_frames = ioc->hba_queue_depth + 32;
1639 -
1640 - /* number of replies frames can't be a multiple of 16 */
1641 - /* decrease number of reply frames by 1 */
1642 - if (!(num_of_reply_frames % 16))
1643 - num_of_reply_frames--;
1644 -
1645 - /* calculate number of reply free queue entries
1646 - * (must be multiple of 16)
1647 - */
1648 -
1649 - /* (we know reply_free_queue_depth is not a multiple of 16) */
1650 - queue_size = num_of_reply_frames;
1651 - queue_size += 16 - (queue_size % 16);
1652 - ioc->reply_free_queue_depth = queue_size;
1653 -
1654 - /* reply descriptor post queue sizing */
1655 - /* this size should be the number of request frames + number of reply
1656 - * frames
1657 - */
1658 -
1659 - queue_size = ioc->hba_queue_depth + num_of_reply_frames + 1;
1660 - /* round up to 16 byte boundary */
1661 - if (queue_size % 16)
1662 - queue_size += 16 - (queue_size % 16);
1663 -
1664 - /* check against IOC maximum reply post queue depth */
1665 - if (queue_size > facts->MaxReplyDescriptorPostQueueDepth) {
1666 - queue_diff = queue_size -
1667 - facts->MaxReplyDescriptorPostQueueDepth;
1668 + /* reply free queue sizing - taking into account for 64 FW events */
1669 + ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
1670
1671 - /* round queue_diff up to multiple of 16 */
1672 - if (queue_diff % 16)
1673 - queue_diff += 16 - (queue_diff % 16);
1674 -
1675 - /* adjust hba_queue_depth, reply_free_queue_depth,
1676 - * and queue_size
1677 - */
1678 - ioc->hba_queue_depth -= (queue_diff / 2);
1679 - ioc->reply_free_queue_depth -= (queue_diff / 2);
1680 - queue_size = facts->MaxReplyDescriptorPostQueueDepth;
1681 + /* align the reply post queue on the next 16 count boundary */
1682 + if (!ioc->reply_free_queue_depth % 16)
1683 + ioc->reply_post_queue_depth = ioc->reply_free_queue_depth + 16;
1684 + else
1685 + ioc->reply_post_queue_depth = ioc->reply_free_queue_depth +
1686 + 32 - (ioc->reply_free_queue_depth % 16);
1687 + if (ioc->reply_post_queue_depth >
1688 + facts->MaxReplyDescriptorPostQueueDepth) {
1689 + ioc->reply_post_queue_depth = min_t(u16,
1690 + (facts->MaxReplyDescriptorPostQueueDepth -
1691 + (facts->MaxReplyDescriptorPostQueueDepth % 16)),
1692 + (ioc->hba_queue_depth - (ioc->hba_queue_depth % 16)));
1693 + ioc->reply_free_queue_depth = ioc->reply_post_queue_depth - 16;
1694 + ioc->hba_queue_depth = ioc->reply_free_queue_depth - 64;
1695 }
1696 - ioc->reply_post_queue_depth = queue_size;
1697 +
1698
1699 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scatter gather: "
1700 "sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), "
1701 @@ -2316,15 +2290,12 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
1702 "depth(%d)\n", ioc->name, ioc->request,
1703 ioc->scsiio_depth));
1704
1705 - /* loop till the allocation succeeds */
1706 - do {
1707 - sz = ioc->chain_depth * sizeof(struct chain_tracker);
1708 - ioc->chain_pages = get_order(sz);
1709 - ioc->chain_lookup = (struct chain_tracker *)__get_free_pages(
1710 - GFP_KERNEL, ioc->chain_pages);
1711 - if (ioc->chain_lookup == NULL)
1712 - ioc->chain_depth -= 100;
1713 - } while (ioc->chain_lookup == NULL);
1714 + ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
1715 + sz = ioc->chain_depth * sizeof(struct chain_tracker);
1716 + ioc->chain_pages = get_order(sz);
1717 +
1718 + ioc->chain_lookup = (struct chain_tracker *)__get_free_pages(
1719 + GFP_KERNEL, ioc->chain_pages);
1720 ioc->chain_dma_pool = pci_pool_create("chain pool", ioc->pdev,
1721 ioc->request_sz, 16, 0);
1722 if (!ioc->chain_dma_pool) {
1723 diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
1724 index c79857e..aa51195 100644
1725 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
1726 +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
1727 @@ -974,8 +974,8 @@ _scsih_get_chain_buffer_tracker(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1728 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
1729 if (list_empty(&ioc->free_chain_list)) {
1730 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1731 - printk(MPT2SAS_WARN_FMT "chain buffers not available\n",
1732 - ioc->name);
1733 + dfailprintk(ioc, printk(MPT2SAS_WARN_FMT "chain buffers not "
1734 + "available\n", ioc->name));
1735 return NULL;
1736 }
1737 chain_req = list_entry(ioc->free_chain_list.next,
1738 @@ -6425,6 +6425,7 @@ _scsih_mark_responding_raid_device(struct MPT2SAS_ADAPTER *ioc, u64 wwid,
1739 } else
1740 sas_target_priv_data = NULL;
1741 raid_device->responding = 1;
1742 + spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1743 starget_printk(KERN_INFO, raid_device->starget,
1744 "handle(0x%04x), wwid(0x%016llx)\n", handle,
1745 (unsigned long long)raid_device->wwid);
1746 @@ -6435,16 +6436,16 @@ _scsih_mark_responding_raid_device(struct MPT2SAS_ADAPTER *ioc, u64 wwid,
1747 */
1748 _scsih_init_warpdrive_properties(ioc, raid_device);
1749 if (raid_device->handle == handle)
1750 - goto out;
1751 + return;
1752 printk(KERN_INFO "\thandle changed from(0x%04x)!!!\n",
1753 raid_device->handle);
1754 raid_device->handle = handle;
1755 if (sas_target_priv_data)
1756 sas_target_priv_data->handle = handle;
1757 - goto out;
1758 + return;
1759 }
1760 }
1761 - out:
1762 +
1763 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1764 }
1765
1766 diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
1767 index 953773c..7d8b5d8 100644
1768 --- a/drivers/scsi/sd.c
1769 +++ b/drivers/scsi/sd.c
1770 @@ -1073,6 +1073,10 @@ static int sd_ioctl(struct block_device *bdev, fmode_t mode,
1771 SCSI_LOG_IOCTL(1, printk("sd_ioctl: disk=%s, cmd=0x%x\n",
1772 disk->disk_name, cmd));
1773
1774 + error = scsi_verify_blk_ioctl(bdev, cmd);
1775 + if (error < 0)
1776 + return error;
1777 +
1778 /*
1779 * If we are in the middle of error recovery, don't let anyone
1780 * else try and use this device. Also, if error recovery fails, it
1781 @@ -1095,7 +1099,7 @@ static int sd_ioctl(struct block_device *bdev, fmode_t mode,
1782 error = scsi_ioctl(sdp, cmd, p);
1783 break;
1784 default:
1785 - error = scsi_cmd_ioctl(disk->queue, disk, mode, cmd, p);
1786 + error = scsi_cmd_blk_ioctl(bdev, mode, cmd, p);
1787 if (error != -ENOTTY)
1788 break;
1789 error = scsi_ioctl(sdp, cmd, p);
1790 @@ -1265,6 +1269,11 @@ static int sd_compat_ioctl(struct block_device *bdev, fmode_t mode,
1791 unsigned int cmd, unsigned long arg)
1792 {
1793 struct scsi_device *sdev = scsi_disk(bdev->bd_disk)->device;
1794 + int ret;
1795 +
1796 + ret = scsi_verify_blk_ioctl(bdev, cmd);
1797 + if (ret < 0)
1798 + return -ENOIOCTLCMD;
1799
1800 /*
1801 * If we are in the middle of error recovery, don't let anyone
1802 @@ -1276,8 +1285,6 @@ static int sd_compat_ioctl(struct block_device *bdev, fmode_t mode,
1803 return -ENODEV;
1804
1805 if (sdev->host->hostt->compat_ioctl) {
1806 - int ret;
1807 -
1808 ret = sdev->host->hostt->compat_ioctl(sdev, cmd, (void __user *)arg);
1809
1810 return ret;
1811 diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
1812 index b4543f5..36d1ed7 100644
1813 --- a/drivers/scsi/sym53c8xx_2/sym_glue.c
1814 +++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
1815 @@ -839,6 +839,10 @@ static void sym53c8xx_slave_destroy(struct scsi_device *sdev)
1816 struct sym_lcb *lp = sym_lp(tp, sdev->lun);
1817 unsigned long flags;
1818
1819 + /* if slave_alloc returned before allocating a sym_lcb, return */
1820 + if (!lp)
1821 + return;
1822 +
1823 spin_lock_irqsave(np->s.host->host_lock, flags);
1824
1825 if (lp->busy_itlq || lp->busy_itl) {
1826 diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
1827 index 7f19c8b..f044d45 100644
1828 --- a/drivers/target/target_core_cdb.c
1829 +++ b/drivers/target/target_core_cdb.c
1830 @@ -84,6 +84,18 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
1831 buf[2] = dev->transport->get_device_rev(dev);
1832
1833 /*
1834 + * NORMACA and HISUP = 0, RESPONSE DATA FORMAT = 2
1835 + *
1836 + * SPC4 says:
1837 + * A RESPONSE DATA FORMAT field set to 2h indicates that the
1838 + * standard INQUIRY data is in the format defined in this
1839 + * standard. Response data format values less than 2h are
1840 + * obsolete. Response data format values greater than 2h are
1841 + * reserved.
1842 + */
1843 + buf[3] = 2;
1844 +
1845 + /*
1846 * Enable SCCS and TPGS fields for Emulated ALUA
1847 */
1848 if (T10_ALUA(dev->se_sub_dev)->alua_type == SPC3_ALUA_EMULATED)
1849 diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
1850 index 1340ffd..bb86655 100644
1851 --- a/drivers/target/target_core_transport.c
1852 +++ b/drivers/target/target_core_transport.c
1853 @@ -5668,6 +5668,8 @@ int transport_send_check_condition_and_sense(
1854 case TCM_SECTOR_COUNT_TOO_MANY:
1855 /* CURRENT ERROR */
1856 buffer[offset] = 0x70;
1857 + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
1858 + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
1859 /* ILLEGAL REQUEST */
1860 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
1861 /* INVALID COMMAND OPERATION CODE */
1862 @@ -5676,6 +5678,7 @@ int transport_send_check_condition_and_sense(
1863 case TCM_UNKNOWN_MODE_PAGE:
1864 /* CURRENT ERROR */
1865 buffer[offset] = 0x70;
1866 + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
1867 /* ILLEGAL REQUEST */
1868 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
1869 /* INVALID FIELD IN CDB */
1870 @@ -5684,6 +5687,7 @@ int transport_send_check_condition_and_sense(
1871 case TCM_CHECK_CONDITION_ABORT_CMD:
1872 /* CURRENT ERROR */
1873 buffer[offset] = 0x70;
1874 + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
1875 /* ABORTED COMMAND */
1876 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
1877 /* BUS DEVICE RESET FUNCTION OCCURRED */
1878 @@ -5693,6 +5697,7 @@ int transport_send_check_condition_and_sense(
1879 case TCM_INCORRECT_AMOUNT_OF_DATA:
1880 /* CURRENT ERROR */
1881 buffer[offset] = 0x70;
1882 + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
1883 /* ABORTED COMMAND */
1884 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
1885 /* WRITE ERROR */
1886 @@ -5703,6 +5708,7 @@ int transport_send_check_condition_and_sense(
1887 case TCM_INVALID_CDB_FIELD:
1888 /* CURRENT ERROR */
1889 buffer[offset] = 0x70;
1890 + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
1891 /* ABORTED COMMAND */
1892 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
1893 /* INVALID FIELD IN CDB */
1894 @@ -5711,6 +5717,7 @@ int transport_send_check_condition_and_sense(
1895 case TCM_INVALID_PARAMETER_LIST:
1896 /* CURRENT ERROR */
1897 buffer[offset] = 0x70;
1898 + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
1899 /* ABORTED COMMAND */
1900 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
1901 /* INVALID FIELD IN PARAMETER LIST */
1902 @@ -5719,6 +5726,7 @@ int transport_send_check_condition_and_sense(
1903 case TCM_UNEXPECTED_UNSOLICITED_DATA:
1904 /* CURRENT ERROR */
1905 buffer[offset] = 0x70;
1906 + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
1907 /* ABORTED COMMAND */
1908 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
1909 /* WRITE ERROR */
1910 @@ -5729,6 +5737,7 @@ int transport_send_check_condition_and_sense(
1911 case TCM_SERVICE_CRC_ERROR:
1912 /* CURRENT ERROR */
1913 buffer[offset] = 0x70;
1914 + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
1915 /* ABORTED COMMAND */
1916 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
1917 /* PROTOCOL SERVICE CRC ERROR */
1918 @@ -5739,6 +5748,7 @@ int transport_send_check_condition_and_sense(
1919 case TCM_SNACK_REJECTED:
1920 /* CURRENT ERROR */
1921 buffer[offset] = 0x70;
1922 + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
1923 /* ABORTED COMMAND */
1924 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
1925 /* READ ERROR */
1926 @@ -5749,6 +5759,7 @@ int transport_send_check_condition_and_sense(
1927 case TCM_WRITE_PROTECTED:
1928 /* CURRENT ERROR */
1929 buffer[offset] = 0x70;
1930 + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
1931 /* DATA PROTECT */
1932 buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT;
1933 /* WRITE PROTECTED */
1934 @@ -5757,6 +5768,7 @@ int transport_send_check_condition_and_sense(
1935 case TCM_CHECK_CONDITION_UNIT_ATTENTION:
1936 /* CURRENT ERROR */
1937 buffer[offset] = 0x70;
1938 + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
1939 /* UNIT ATTENTION */
1940 buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
1941 core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
1942 @@ -5766,6 +5778,7 @@ int transport_send_check_condition_and_sense(
1943 case TCM_CHECK_CONDITION_NOT_READY:
1944 /* CURRENT ERROR */
1945 buffer[offset] = 0x70;
1946 + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
1947 /* Not Ready */
1948 buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY;
1949 transport_get_sense_codes(cmd, &asc, &ascq);
1950 @@ -5776,6 +5789,7 @@ int transport_send_check_condition_and_sense(
1951 default:
1952 /* CURRENT ERROR */
1953 buffer[offset] = 0x70;
1954 + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
1955 /* ILLEGAL REQUEST */
1956 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
1957 /* LOGICAL UNIT COMMUNICATION FAILURE */
1958 diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
1959 index 5534690..daee5db 100644
1960 --- a/drivers/xen/xenbus/xenbus_xs.c
1961 +++ b/drivers/xen/xenbus/xenbus_xs.c
1962 @@ -801,6 +801,12 @@ static int process_msg(void)
1963 goto out;
1964 }
1965
1966 + if (msg->hdr.len > XENSTORE_PAYLOAD_MAX) {
1967 + kfree(msg);
1968 + err = -EINVAL;
1969 + goto out;
1970 + }
1971 +
1972 body = kmalloc(msg->hdr.len + 1, GFP_NOIO | __GFP_HIGH);
1973 if (body == NULL) {
1974 kfree(msg);
1975 diff --git a/fs/dcache.c b/fs/dcache.c
1976 index d2f8feb..f598b98 100644
1977 --- a/fs/dcache.c
1978 +++ b/fs/dcache.c
1979 @@ -241,6 +241,7 @@ static void dentry_lru_add(struct dentry *dentry)
1980 static void __dentry_lru_del(struct dentry *dentry)
1981 {
1982 list_del_init(&dentry->d_lru);
1983 + dentry->d_flags &= ~DCACHE_SHRINK_LIST;
1984 dentry->d_sb->s_nr_dentry_unused--;
1985 dentry_stat.nr_unused--;
1986 }
1987 @@ -753,6 +754,7 @@ relock:
1988 spin_unlock(&dentry->d_lock);
1989 } else {
1990 list_move_tail(&dentry->d_lru, &tmp);
1991 + dentry->d_flags |= DCACHE_SHRINK_LIST;
1992 spin_unlock(&dentry->d_lock);
1993 if (!--cnt)
1994 break;
1995 @@ -1144,14 +1146,18 @@ resume:
1996 /*
1997 * move only zero ref count dentries to the end
1998 * of the unused list for prune_dcache
1999 + *
2000 + * Those which are presently on the shrink list, being processed
2001 + * by shrink_dentry_list(), shouldn't be moved. Otherwise the
2002 + * loop in shrink_dcache_parent() might not make any progress
2003 + * and loop forever.
2004 */
2005 - if (!dentry->d_count) {
2006 + if (dentry->d_count) {
2007 + dentry_lru_del(dentry);
2008 + } else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) {
2009 dentry_lru_move_tail(dentry);
2010 found++;
2011 - } else {
2012 - dentry_lru_del(dentry);
2013 }
2014 -
2015 /*
2016 * We can return to the caller if we have found some (this
2017 * ensures forward progress). We'll be coming back to find
2018 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
2019 index 7aa77f0..df121b2 100644
2020 --- a/fs/ext4/super.c
2021 +++ b/fs/ext4/super.c
2022 @@ -1957,17 +1957,16 @@ static int ext4_fill_flex_info(struct super_block *sb)
2023 struct ext4_group_desc *gdp = NULL;
2024 ext4_group_t flex_group_count;
2025 ext4_group_t flex_group;
2026 - int groups_per_flex = 0;
2027 + unsigned int groups_per_flex = 0;
2028 size_t size;
2029 int i;
2030
2031 sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex;
2032 - groups_per_flex = 1 << sbi->s_log_groups_per_flex;
2033 -
2034 - if (groups_per_flex < 2) {
2035 + if (sbi->s_log_groups_per_flex < 1 || sbi->s_log_groups_per_flex > 31) {
2036 sbi->s_log_groups_per_flex = 0;
2037 return 1;
2038 }
2039 + groups_per_flex = 1 << sbi->s_log_groups_per_flex;
2040
2041 /* We allocate both existing and potentially added groups */
2042 flex_group_count = ((sbi->s_groups_count + groups_per_flex - 1) +
2043 diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
2044 index aaa09e9..b5c826e 100644
2045 --- a/fs/nfs/callback_proc.c
2046 +++ b/fs/nfs/callback_proc.c
2047 @@ -324,7 +324,7 @@ validate_seqid(struct nfs4_slot_table *tbl, struct cb_sequenceargs * args)
2048 dprintk("%s enter. slotid %d seqid %d\n",
2049 __func__, args->csa_slotid, args->csa_sequenceid);
2050
2051 - if (args->csa_slotid > NFS41_BC_MAX_CALLBACKS)
2052 + if (args->csa_slotid >= NFS41_BC_MAX_CALLBACKS)
2053 return htonl(NFS4ERR_BADSLOT);
2054
2055 slot = tbl->slots + args->csa_slotid;
2056 diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c
2057 index 1d1dc1e..75fe694 100644
2058 --- a/fs/nfs/objlayout/objio_osd.c
2059 +++ b/fs/nfs/objlayout/objio_osd.c
2060 @@ -1006,7 +1006,8 @@ static bool objio_pg_test(struct nfs_pageio_descriptor *pgio,
2061 static struct pnfs_layoutdriver_type objlayout_type = {
2062 .id = LAYOUT_OSD2_OBJECTS,
2063 .name = "LAYOUT_OSD2_OBJECTS",
2064 - .flags = PNFS_LAYOUTRET_ON_SETATTR,
2065 + .flags = PNFS_LAYOUTRET_ON_SETATTR |
2066 + PNFS_LAYOUTRET_ON_ERROR,
2067
2068 .alloc_layout_hdr = objlayout_alloc_layout_hdr,
2069 .free_layout_hdr = objlayout_free_layout_hdr,
2070 diff --git a/fs/nfs/objlayout/objlayout.c b/fs/nfs/objlayout/objlayout.c
2071 index 1d06f8e..fefa122 100644
2072 --- a/fs/nfs/objlayout/objlayout.c
2073 +++ b/fs/nfs/objlayout/objlayout.c
2074 @@ -294,9 +294,11 @@ objlayout_read_done(struct objlayout_io_state *state, ssize_t status, bool sync)
2075 dprintk("%s: Begin status=%zd eof=%d\n", __func__, status, eof);
2076 rdata = state->rpcdata;
2077 rdata->task.tk_status = status;
2078 - if (status >= 0) {
2079 + if (likely(status >= 0)) {
2080 rdata->res.count = status;
2081 rdata->res.eof = eof;
2082 + } else {
2083 + rdata->pnfs_error = status;
2084 }
2085 objlayout_iodone(state);
2086 /* must not use state after this point */
2087 @@ -380,15 +382,17 @@ objlayout_write_done(struct objlayout_io_state *state, ssize_t status,
2088 wdata = state->rpcdata;
2089 state->status = status;
2090 wdata->task.tk_status = status;
2091 - if (status >= 0) {
2092 + if (likely(status >= 0)) {
2093 wdata->res.count = status;
2094 wdata->verf.committed = state->committed;
2095 dprintk("%s: Return status %d committed %d\n",
2096 __func__, wdata->task.tk_status,
2097 wdata->verf.committed);
2098 - } else
2099 + } else {
2100 + wdata->pnfs_error = status;
2101 dprintk("%s: Return status %d\n",
2102 __func__, wdata->task.tk_status);
2103 + }
2104 objlayout_iodone(state);
2105 /* must not use state after this point */
2106
2107 diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
2108 index 36d2a29..9951887 100644
2109 --- a/fs/nfs/pnfs.c
2110 +++ b/fs/nfs/pnfs.c
2111 @@ -1119,6 +1119,14 @@ pnfs_ld_write_done(struct nfs_write_data *data)
2112 data->mds_ops->rpc_release(data);
2113 return 0;
2114 }
2115 + if (NFS_SERVER(data->inode)->pnfs_curr_ld->flags &
2116 + PNFS_LAYOUTRET_ON_ERROR) {
2117 + /* Don't lo_commit on error, Server will needs to
2118 + * preform a file recovery.
2119 + */
2120 + clear_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(data->inode)->flags);
2121 + pnfs_return_layout(data->inode);
2122 + }
2123
2124 dprintk("%s: pnfs_error=%d, retry via MDS\n", __func__,
2125 data->pnfs_error);
2126 @@ -1167,6 +1175,10 @@ pnfs_ld_read_done(struct nfs_read_data *data)
2127 return 0;
2128 }
2129
2130 + if (NFS_SERVER(data->inode)->pnfs_curr_ld->flags &
2131 + PNFS_LAYOUTRET_ON_ERROR)
2132 + pnfs_return_layout(data->inode);
2133 +
2134 dprintk("%s: pnfs_error=%d, retry via MDS\n", __func__,
2135 data->pnfs_error);
2136 status = nfs_initiate_read(data, NFS_CLIENT(data->inode),
2137 diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
2138 index 9d147d9..bb8b324 100644
2139 --- a/fs/nfs/pnfs.h
2140 +++ b/fs/nfs/pnfs.h
2141 @@ -68,6 +68,7 @@ enum {
2142 enum layoutdriver_policy_flags {
2143 /* Should the pNFS client commit and return the layout upon a setattr */
2144 PNFS_LAYOUTRET_ON_SETATTR = 1 << 0,
2145 + PNFS_LAYOUTRET_ON_ERROR = 1 << 1,
2146 };
2147
2148 struct nfs4_deviceid_node;
2149 diff --git a/fs/nfs/super.c b/fs/nfs/super.c
2150 index 858d31b..7e8b07d 100644
2151 --- a/fs/nfs/super.c
2152 +++ b/fs/nfs/super.c
2153 @@ -904,10 +904,24 @@ static struct nfs_parsed_mount_data *nfs_alloc_parsed_mount_data(unsigned int ve
2154 data->auth_flavor_len = 1;
2155 data->version = version;
2156 data->minorversion = 0;
2157 + security_init_mnt_opts(&data->lsm_opts);
2158 }
2159 return data;
2160 }
2161
2162 +static void nfs_free_parsed_mount_data(struct nfs_parsed_mount_data *data)
2163 +{
2164 + if (data) {
2165 + kfree(data->client_address);
2166 + kfree(data->mount_server.hostname);
2167 + kfree(data->nfs_server.export_path);
2168 + kfree(data->nfs_server.hostname);
2169 + kfree(data->fscache_uniq);
2170 + security_free_mnt_opts(&data->lsm_opts);
2171 + kfree(data);
2172 + }
2173 +}
2174 +
2175 /*
2176 * Sanity-check a server address provided by the mount command.
2177 *
2178 @@ -2218,9 +2232,7 @@ static struct dentry *nfs_fs_mount(struct file_system_type *fs_type,
2179 data = nfs_alloc_parsed_mount_data(NFS_DEFAULT_VERSION);
2180 mntfh = nfs_alloc_fhandle();
2181 if (data == NULL || mntfh == NULL)
2182 - goto out_free_fh;
2183 -
2184 - security_init_mnt_opts(&data->lsm_opts);
2185 + goto out;
2186
2187 /* Validate the mount data */
2188 error = nfs_validate_mount_data(raw_data, data, mntfh, dev_name);
2189 @@ -2232,8 +2244,6 @@ static struct dentry *nfs_fs_mount(struct file_system_type *fs_type,
2190 #ifdef CONFIG_NFS_V4
2191 if (data->version == 4) {
2192 mntroot = nfs4_try_mount(flags, dev_name, data);
2193 - kfree(data->client_address);
2194 - kfree(data->nfs_server.export_path);
2195 goto out;
2196 }
2197 #endif /* CONFIG_NFS_V4 */
2198 @@ -2284,13 +2294,8 @@ static struct dentry *nfs_fs_mount(struct file_system_type *fs_type,
2199 s->s_flags |= MS_ACTIVE;
2200
2201 out:
2202 - kfree(data->nfs_server.hostname);
2203 - kfree(data->mount_server.hostname);
2204 - kfree(data->fscache_uniq);
2205 - security_free_mnt_opts(&data->lsm_opts);
2206 -out_free_fh:
2207 + nfs_free_parsed_mount_data(data);
2208 nfs_free_fhandle(mntfh);
2209 - kfree(data);
2210 return mntroot;
2211
2212 out_err_nosb:
2213 @@ -2613,9 +2618,7 @@ nfs4_remote_mount(struct file_system_type *fs_type, int flags,
2214
2215 mntfh = nfs_alloc_fhandle();
2216 if (data == NULL || mntfh == NULL)
2217 - goto out_free_fh;
2218 -
2219 - security_init_mnt_opts(&data->lsm_opts);
2220 + goto out;
2221
2222 /* Get a volume representation */
2223 server = nfs4_create_server(data, mntfh);
2224 @@ -2663,13 +2666,10 @@ nfs4_remote_mount(struct file_system_type *fs_type, int flags,
2225
2226 s->s_flags |= MS_ACTIVE;
2227
2228 - security_free_mnt_opts(&data->lsm_opts);
2229 nfs_free_fhandle(mntfh);
2230 return mntroot;
2231
2232 out:
2233 - security_free_mnt_opts(&data->lsm_opts);
2234 -out_free_fh:
2235 nfs_free_fhandle(mntfh);
2236 return ERR_PTR(error);
2237
2238 @@ -2855,7 +2855,7 @@ static struct dentry *nfs4_mount(struct file_system_type *fs_type,
2239
2240 data = nfs_alloc_parsed_mount_data(4);
2241 if (data == NULL)
2242 - goto out_free_data;
2243 + goto out;
2244
2245 /* Validate the mount data */
2246 error = nfs4_validate_mount_data(raw_data, data, dev_name);
2247 @@ -2869,12 +2869,7 @@ static struct dentry *nfs4_mount(struct file_system_type *fs_type,
2248 error = PTR_ERR(res);
2249
2250 out:
2251 - kfree(data->client_address);
2252 - kfree(data->nfs_server.export_path);
2253 - kfree(data->nfs_server.hostname);
2254 - kfree(data->fscache_uniq);
2255 -out_free_data:
2256 - kfree(data);
2257 + nfs_free_parsed_mount_data(data);
2258 dprintk("<-- nfs4_mount() = %d%s\n", error,
2259 error != 0 ? " [error]" : "");
2260 return res;
2261 diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
2262 index b9566e4..4b470f6 100644
2263 --- a/fs/nfsd/export.c
2264 +++ b/fs/nfsd/export.c
2265 @@ -88,7 +88,7 @@ static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen)
2266 struct svc_expkey key;
2267 struct svc_expkey *ek = NULL;
2268
2269 - if (mesg[mlen-1] != '\n')
2270 + if (mlen < 1 || mesg[mlen-1] != '\n')
2271 return -EINVAL;
2272 mesg[mlen-1] = 0;
2273
2274 diff --git a/fs/notify/mark.c b/fs/notify/mark.c
2275 index 252ab1f..42ed195 100644
2276 --- a/fs/notify/mark.c
2277 +++ b/fs/notify/mark.c
2278 @@ -135,9 +135,6 @@ void fsnotify_destroy_mark(struct fsnotify_mark *mark)
2279
2280 mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE;
2281
2282 - /* 1 from caller and 1 for being on i_list/g_list */
2283 - BUG_ON(atomic_read(&mark->refcnt) < 2);
2284 -
2285 spin_lock(&group->mark_lock);
2286
2287 if (mark->flags & FSNOTIFY_MARK_FLAG_INODE) {
2288 @@ -182,6 +179,11 @@ void fsnotify_destroy_mark(struct fsnotify_mark *mark)
2289 iput(inode);
2290
2291 /*
2292 + * We don't necessarily have a ref on mark from caller so the above iput
2293 + * may have already destroyed it. Don't touch from now on.
2294 + */
2295 +
2296 + /*
2297 * it's possible that this group tried to destroy itself, but this
2298 * this mark was simultaneously being freed by inode. If that's the
2299 * case, we finish freeing the group here.
2300 diff --git a/fs/proc/base.c b/fs/proc/base.c
2301 index f039017..7b28f27 100644
2302 --- a/fs/proc/base.c
2303 +++ b/fs/proc/base.c
2304 @@ -194,65 +194,7 @@ static int proc_root_link(struct inode *inode, struct path *path)
2305 return result;
2306 }
2307
2308 -static struct mm_struct *__check_mem_permission(struct task_struct *task)
2309 -{
2310 - struct mm_struct *mm;
2311 -
2312 - mm = get_task_mm(task);
2313 - if (!mm)
2314 - return ERR_PTR(-EINVAL);
2315 -
2316 - /*
2317 - * A task can always look at itself, in case it chooses
2318 - * to use system calls instead of load instructions.
2319 - */
2320 - if (task == current)
2321 - return mm;
2322 -
2323 - /*
2324 - * If current is actively ptrace'ing, and would also be
2325 - * permitted to freshly attach with ptrace now, permit it.
2326 - */
2327 - if (task_is_stopped_or_traced(task)) {
2328 - int match;
2329 - rcu_read_lock();
2330 - match = (tracehook_tracer_task(task) == current);
2331 - rcu_read_unlock();
2332 - if (match && ptrace_may_access(task, PTRACE_MODE_ATTACH))
2333 - return mm;
2334 - }
2335 -
2336 - /*
2337 - * No one else is allowed.
2338 - */
2339 - mmput(mm);
2340 - return ERR_PTR(-EPERM);
2341 -}
2342 -
2343 -/*
2344 - * If current may access user memory in @task return a reference to the
2345 - * corresponding mm, otherwise ERR_PTR.
2346 - */
2347 -static struct mm_struct *check_mem_permission(struct task_struct *task)
2348 -{
2349 - struct mm_struct *mm;
2350 - int err;
2351 -
2352 - /*
2353 - * Avoid racing if task exec's as we might get a new mm but validate
2354 - * against old credentials.
2355 - */
2356 - err = mutex_lock_killable(&task->signal->cred_guard_mutex);
2357 - if (err)
2358 - return ERR_PTR(err);
2359 -
2360 - mm = __check_mem_permission(task);
2361 - mutex_unlock(&task->signal->cred_guard_mutex);
2362 -
2363 - return mm;
2364 -}
2365 -
2366 -struct mm_struct *mm_for_maps(struct task_struct *task)
2367 +static struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
2368 {
2369 struct mm_struct *mm;
2370 int err;
2371 @@ -263,7 +205,7 @@ struct mm_struct *mm_for_maps(struct task_struct *task)
2372
2373 mm = get_task_mm(task);
2374 if (mm && mm != current->mm &&
2375 - !ptrace_may_access(task, PTRACE_MODE_READ)) {
2376 + !ptrace_may_access(task, mode)) {
2377 mmput(mm);
2378 mm = ERR_PTR(-EACCES);
2379 }
2380 @@ -272,6 +214,11 @@ struct mm_struct *mm_for_maps(struct task_struct *task)
2381 return mm;
2382 }
2383
2384 +struct mm_struct *mm_for_maps(struct task_struct *task)
2385 +{
2386 + return mm_access(task, PTRACE_MODE_READ);
2387 +}
2388 +
2389 static int proc_pid_cmdline(struct task_struct *task, char * buffer)
2390 {
2391 int res = 0;
2392 @@ -816,38 +763,39 @@ static const struct file_operations proc_single_file_operations = {
2393
2394 static int mem_open(struct inode* inode, struct file* file)
2395 {
2396 - file->private_data = (void*)((long)current->self_exec_id);
2397 + struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
2398 + struct mm_struct *mm;
2399 +
2400 + if (!task)
2401 + return -ESRCH;
2402 +
2403 + mm = mm_access(task, PTRACE_MODE_ATTACH);
2404 + put_task_struct(task);
2405 +
2406 + if (IS_ERR(mm))
2407 + return PTR_ERR(mm);
2408 +
2409 /* OK to pass negative loff_t, we can catch out-of-range */
2410 file->f_mode |= FMODE_UNSIGNED_OFFSET;
2411 + file->private_data = mm;
2412 +
2413 return 0;
2414 }
2415
2416 static ssize_t mem_read(struct file * file, char __user * buf,
2417 size_t count, loff_t *ppos)
2418 {
2419 - struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
2420 + int ret;
2421 char *page;
2422 unsigned long src = *ppos;
2423 - int ret = -ESRCH;
2424 - struct mm_struct *mm;
2425 + struct mm_struct *mm = file->private_data;
2426
2427 - if (!task)
2428 - goto out_no_task;
2429 + if (!mm)
2430 + return 0;
2431
2432 - ret = -ENOMEM;
2433 page = (char *)__get_free_page(GFP_TEMPORARY);
2434 if (!page)
2435 - goto out;
2436 -
2437 - mm = check_mem_permission(task);
2438 - ret = PTR_ERR(mm);
2439 - if (IS_ERR(mm))
2440 - goto out_free;
2441 -
2442 - ret = -EIO;
2443 -
2444 - if (file->private_data != (void*)((long)current->self_exec_id))
2445 - goto out_put;
2446 + return -ENOMEM;
2447
2448 ret = 0;
2449
2450 @@ -874,13 +822,7 @@ static ssize_t mem_read(struct file * file, char __user * buf,
2451 }
2452 *ppos = src;
2453
2454 -out_put:
2455 - mmput(mm);
2456 -out_free:
2457 free_page((unsigned long) page);
2458 -out:
2459 - put_task_struct(task);
2460 -out_no_task:
2461 return ret;
2462 }
2463
2464 @@ -889,27 +831,15 @@ static ssize_t mem_write(struct file * file, const char __user *buf,
2465 {
2466 int copied;
2467 char *page;
2468 - struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
2469 unsigned long dst = *ppos;
2470 - struct mm_struct *mm;
2471 + struct mm_struct *mm = file->private_data;
2472
2473 - copied = -ESRCH;
2474 - if (!task)
2475 - goto out_no_task;
2476 + if (!mm)
2477 + return 0;
2478
2479 - copied = -ENOMEM;
2480 page = (char *)__get_free_page(GFP_TEMPORARY);
2481 if (!page)
2482 - goto out_task;
2483 -
2484 - mm = check_mem_permission(task);
2485 - copied = PTR_ERR(mm);
2486 - if (IS_ERR(mm))
2487 - goto out_free;
2488 -
2489 - copied = -EIO;
2490 - if (file->private_data != (void *)((long)current->self_exec_id))
2491 - goto out_mm;
2492 + return -ENOMEM;
2493
2494 copied = 0;
2495 while (count > 0) {
2496 @@ -933,13 +863,7 @@ static ssize_t mem_write(struct file * file, const char __user *buf,
2497 }
2498 *ppos = dst;
2499
2500 -out_mm:
2501 - mmput(mm);
2502 -out_free:
2503 free_page((unsigned long) page);
2504 -out_task:
2505 - put_task_struct(task);
2506 -out_no_task:
2507 return copied;
2508 }
2509
2510 @@ -959,11 +883,20 @@ loff_t mem_lseek(struct file *file, loff_t offset, int orig)
2511 return file->f_pos;
2512 }
2513
2514 +static int mem_release(struct inode *inode, struct file *file)
2515 +{
2516 + struct mm_struct *mm = file->private_data;
2517 +
2518 + mmput(mm);
2519 + return 0;
2520 +}
2521 +
2522 static const struct file_operations proc_mem_operations = {
2523 .llseek = mem_lseek,
2524 .read = mem_read,
2525 .write = mem_write,
2526 .open = mem_open,
2527 + .release = mem_release,
2528 };
2529
2530 static ssize_t environ_read(struct file *file, char __user *buf,
2531 diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
2532 index c7d4ee6..3487b06 100644
2533 --- a/fs/proc/task_mmu.c
2534 +++ b/fs/proc/task_mmu.c
2535 @@ -516,6 +516,9 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
2536 if (!page)
2537 continue;
2538
2539 + if (PageReserved(page))
2540 + continue;
2541 +
2542 /* Clear accessed and referenced bits. */
2543 ptep_test_and_clear_young(vma, addr, pte);
2544 ClearPageReferenced(page);
2545 diff --git a/fs/proc/uptime.c b/fs/proc/uptime.c
2546 index 766b1d4..29166ec 100644
2547 --- a/fs/proc/uptime.c
2548 +++ b/fs/proc/uptime.c
2549 @@ -11,15 +11,20 @@ static int uptime_proc_show(struct seq_file *m, void *v)
2550 {
2551 struct timespec uptime;
2552 struct timespec idle;
2553 + cputime64_t idletime;
2554 + u64 nsec;
2555 + u32 rem;
2556 int i;
2557 - cputime_t idletime = cputime_zero;
2558
2559 + idletime = 0;
2560 for_each_possible_cpu(i)
2561 idletime = cputime64_add(idletime, kstat_cpu(i).cpustat.idle);
2562
2563 do_posix_clock_monotonic_gettime(&uptime);
2564 monotonic_to_bootbased(&uptime);
2565 - cputime_to_timespec(idletime, &idle);
2566 + nsec = cputime64_to_jiffies64(idletime) * TICK_NSEC;
2567 + idle.tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem);
2568 + idle.tv_nsec = rem;
2569 seq_printf(m, "%lu.%02lu %lu.%02lu\n",
2570 (unsigned long) uptime.tv_sec,
2571 (uptime.tv_nsec / (NSEC_PER_SEC / 100)),
2572 diff --git a/fs/ubifs/debug.h b/fs/ubifs/debug.h
2573 index a811ac4..fd75b63 100644
2574 --- a/fs/ubifs/debug.h
2575 +++ b/fs/ubifs/debug.h
2576 @@ -121,20 +121,21 @@ const char *dbg_key_str1(const struct ubifs_info *c,
2577 const union ubifs_key *key);
2578
2579 /*
2580 - * DBGKEY macros require @dbg_lock to be held, which it is in the dbg message
2581 - * macros.
2582 + * TODO: these macros are now broken because there is no locking around them
2583 + * and we use a global buffer for the key string. This means that in case of
2584 + * concurrent execution we will end up with incorrect and messy key strings.
2585 */
2586 #define DBGKEY(key) dbg_key_str0(c, (key))
2587 #define DBGKEY1(key) dbg_key_str1(c, (key))
2588
2589 -#define ubifs_dbg_msg(type, fmt, ...) do { \
2590 - spin_lock(&dbg_lock); \
2591 - pr_debug("UBIFS DBG " type ": " fmt "\n", ##__VA_ARGS__); \
2592 - spin_unlock(&dbg_lock); \
2593 -} while (0)
2594 +#define ubifs_dbg_msg(type, fmt, ...) \
2595 + pr_debug("UBIFS DBG " type ": " fmt "\n", ##__VA_ARGS__)
2596
2597 /* Just a debugging messages not related to any specific UBIFS subsystem */
2598 -#define dbg_msg(fmt, ...) ubifs_dbg_msg("msg", fmt, ##__VA_ARGS__)
2599 +#define dbg_msg(fmt, ...) \
2600 + printk(KERN_DEBUG "UBIFS DBG (pid %d): %s: " fmt "\n", current->pid, \
2601 + __func__, ##__VA_ARGS__)
2602 +
2603 /* General messages */
2604 #define dbg_gen(fmt, ...) ubifs_dbg_msg("gen", fmt, ##__VA_ARGS__)
2605 /* Additional journal messages */
2606 diff --git a/include/acpi/acpi_numa.h b/include/acpi/acpi_numa.h
2607 index 1739726..451823c 100644
2608 --- a/include/acpi/acpi_numa.h
2609 +++ b/include/acpi/acpi_numa.h
2610 @@ -15,6 +15,7 @@ extern int pxm_to_node(int);
2611 extern int node_to_pxm(int);
2612 extern void __acpi_map_pxm_to_node(int, int);
2613 extern int acpi_map_pxm_to_node(int);
2614 +extern unsigned char acpi_srat_revision;
2615
2616 #endif /* CONFIG_ACPI_NUMA */
2617 #endif /* __ACP_NUMA_H */
2618 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
2619 index cd93f99..1b13021 100644
2620 --- a/include/linux/blkdev.h
2621 +++ b/include/linux/blkdev.h
2622 @@ -670,6 +670,9 @@ extern int blk_insert_cloned_request(struct request_queue *q,
2623 struct request *rq);
2624 extern void blk_delay_queue(struct request_queue *, unsigned long);
2625 extern void blk_recount_segments(struct request_queue *, struct bio *);
2626 +extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
2627 +extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t,
2628 + unsigned int, void __user *);
2629 extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
2630 unsigned int, void __user *);
2631 extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
2632 diff --git a/include/linux/dcache.h b/include/linux/dcache.h
2633 index 8f848e4..f13bb6d 100644
2634 --- a/include/linux/dcache.h
2635 +++ b/include/linux/dcache.h
2636 @@ -207,6 +207,7 @@ struct dentry_operations {
2637
2638 #define DCACHE_CANT_MOUNT 0x0100
2639 #define DCACHE_GENOCIDE 0x0200
2640 +#define DCACHE_SHRINK_LIST 0x0400
2641
2642 #define DCACHE_OP_HASH 0x1000
2643 #define DCACHE_OP_COMPARE 0x2000
2644 diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
2645 index 50940da..313a00e 100644
2646 --- a/include/linux/memcontrol.h
2647 +++ b/include/linux/memcontrol.h
2648 @@ -119,6 +119,8 @@ struct zone_reclaim_stat*
2649 mem_cgroup_get_reclaim_stat_from_page(struct page *page);
2650 extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
2651 struct task_struct *p);
2652 +extern void mem_cgroup_replace_page_cache(struct page *oldpage,
2653 + struct page *newpage);
2654
2655 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
2656 extern int do_swap_account;
2657 @@ -370,6 +372,10 @@ static inline
2658 void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
2659 {
2660 }
2661 +static inline void mem_cgroup_replace_page_cache(struct page *oldpage,
2662 + struct page *newpage)
2663 +{
2664 +}
2665 #endif /* CONFIG_CGROUP_MEM_CONT */
2666
2667 #if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM)
2668 diff --git a/include/linux/pci_regs.h b/include/linux/pci_regs.h
2669 index e884096..dad7d9a 100644
2670 --- a/include/linux/pci_regs.h
2671 +++ b/include/linux/pci_regs.h
2672 @@ -392,7 +392,7 @@
2673 #define PCI_EXP_TYPE_DOWNSTREAM 0x6 /* Downstream Port */
2674 #define PCI_EXP_TYPE_PCI_BRIDGE 0x7 /* PCI/PCI-X Bridge */
2675 #define PCI_EXP_TYPE_RC_END 0x9 /* Root Complex Integrated Endpoint */
2676 -#define PCI_EXP_TYPE_RC_EC 0x10 /* Root Complex Event Collector */
2677 +#define PCI_EXP_TYPE_RC_EC 0xa /* Root Complex Event Collector */
2678 #define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */
2679 #define PCI_EXP_FLAGS_IRQ 0x3e00 /* Interrupt message number */
2680 #define PCI_EXP_DEVCAP 4 /* Device capabilities */
2681 diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h
2682 index 85c50b4..c84e974 100644
2683 --- a/include/linux/sunrpc/svcsock.h
2684 +++ b/include/linux/sunrpc/svcsock.h
2685 @@ -34,7 +34,7 @@ struct svc_sock {
2686 /*
2687 * Function prototypes.
2688 */
2689 -void svc_close_all(struct list_head *);
2690 +void svc_close_all(struct svc_serv *);
2691 int svc_recv(struct svc_rqst *, long);
2692 int svc_send(struct svc_rqst *);
2693 void svc_drop(struct svc_rqst *);
2694 diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
2695 index 8a4c309..eeeda13 100644
2696 --- a/include/linux/videodev2.h
2697 +++ b/include/linux/videodev2.h
2698 @@ -1075,6 +1075,7 @@ struct v4l2_querymenu {
2699 #define V4L2_CTRL_FLAG_NEXT_CTRL 0x80000000
2700
2701 /* User-class control IDs defined by V4L2 */
2702 +#define V4L2_CID_MAX_CTRLS 1024
2703 #define V4L2_CID_BASE (V4L2_CTRL_CLASS_USER | 0x900)
2704 #define V4L2_CID_USER_BASE V4L2_CID_BASE
2705 /* IDs reserved for driver specific controls */
2706 diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
2707 index 561ac99..0fe6679 100644
2708 --- a/include/target/target_core_base.h
2709 +++ b/include/target/target_core_base.h
2710 @@ -36,6 +36,7 @@
2711 #define TRANSPORT_SENSE_BUFFER SCSI_SENSE_BUFFERSIZE
2712 /* Used by transport_send_check_condition_and_sense() */
2713 #define SPC_SENSE_KEY_OFFSET 2
2714 +#define SPC_ADD_SENSE_LEN_OFFSET 7
2715 #define SPC_ASC_KEY_OFFSET 12
2716 #define SPC_ASCQ_KEY_OFFSET 13
2717 #define TRANSPORT_IQN_LEN 224
2718 diff --git a/include/xen/interface/io/xs_wire.h b/include/xen/interface/io/xs_wire.h
2719 index 99fcffb..454ee26 100644
2720 --- a/include/xen/interface/io/xs_wire.h
2721 +++ b/include/xen/interface/io/xs_wire.h
2722 @@ -84,4 +84,7 @@ struct xenstore_domain_interface {
2723 XENSTORE_RING_IDX rsp_cons, rsp_prod;
2724 };
2725
2726 +/* Violating this is very bad. See docs/misc/xenstore.txt. */
2727 +#define XENSTORE_PAYLOAD_MAX 4096
2728 +
2729 #endif /* _XS_WIRE_H */
2730 diff --git a/init/do_mounts.c b/init/do_mounts.c
2731 index c0851a8..ef6478f 100644
2732 --- a/init/do_mounts.c
2733 +++ b/init/do_mounts.c
2734 @@ -360,15 +360,42 @@ out:
2735 }
2736
2737 #ifdef CONFIG_ROOT_NFS
2738 +
2739 +#define NFSROOT_TIMEOUT_MIN 5
2740 +#define NFSROOT_TIMEOUT_MAX 30
2741 +#define NFSROOT_RETRY_MAX 5
2742 +
2743 static int __init mount_nfs_root(void)
2744 {
2745 char *root_dev, *root_data;
2746 + unsigned int timeout;
2747 + int try, err;
2748
2749 - if (nfs_root_data(&root_dev, &root_data) != 0)
2750 - return 0;
2751 - if (do_mount_root(root_dev, "nfs", root_mountflags, root_data) != 0)
2752 + err = nfs_root_data(&root_dev, &root_data);
2753 + if (err != 0)
2754 return 0;
2755 - return 1;
2756 +
2757 + /*
2758 + * The server or network may not be ready, so try several
2759 + * times. Stop after a few tries in case the client wants
2760 + * to fall back to other boot methods.
2761 + */
2762 + timeout = NFSROOT_TIMEOUT_MIN;
2763 + for (try = 1; ; try++) {
2764 + err = do_mount_root(root_dev, "nfs",
2765 + root_mountflags, root_data);
2766 + if (err == 0)
2767 + return 1;
2768 + if (try > NFSROOT_RETRY_MAX)
2769 + break;
2770 +
2771 + /* Wait, in case the server refused us immediately */
2772 + ssleep(timeout);
2773 + timeout <<= 1;
2774 + if (timeout > NFSROOT_TIMEOUT_MAX)
2775 + timeout = NFSROOT_TIMEOUT_MAX;
2776 + }
2777 + return 0;
2778 }
2779 #endif
2780
2781 diff --git a/kernel/kprobes.c b/kernel/kprobes.c
2782 index 7798181..e0f0bdd 100644
2783 --- a/kernel/kprobes.c
2784 +++ b/kernel/kprobes.c
2785 @@ -1077,6 +1077,7 @@ void __kprobes kprobe_flush_task(struct task_struct *tk)
2786 /* Early boot. kretprobe_table_locks not yet initialized. */
2787 return;
2788
2789 + INIT_HLIST_HEAD(&empty_rp);
2790 hash = hash_ptr(tk, KPROBE_HASH_BITS);
2791 head = &kretprobe_inst_table[hash];
2792 kretprobe_table_lock(hash, &flags);
2793 @@ -1085,7 +1086,6 @@ void __kprobes kprobe_flush_task(struct task_struct *tk)
2794 recycle_rp_inst(ri, &empty_rp);
2795 }
2796 kretprobe_table_unlock(hash, &flags);
2797 - INIT_HLIST_HEAD(&empty_rp);
2798 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
2799 hlist_del(&ri->hlist);
2800 kfree(ri);
2801 diff --git a/mm/filemap.c b/mm/filemap.c
2802 index dd828ea..3c981ba 100644
2803 --- a/mm/filemap.c
2804 +++ b/mm/filemap.c
2805 @@ -396,24 +396,11 @@ EXPORT_SYMBOL(filemap_write_and_wait_range);
2806 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
2807 {
2808 int error;
2809 - struct mem_cgroup *memcg = NULL;
2810
2811 VM_BUG_ON(!PageLocked(old));
2812 VM_BUG_ON(!PageLocked(new));
2813 VM_BUG_ON(new->mapping);
2814
2815 - /*
2816 - * This is not page migration, but prepare_migration and
2817 - * end_migration does enough work for charge replacement.
2818 - *
2819 - * In the longer term we probably want a specialized function
2820 - * for moving the charge from old to new in a more efficient
2821 - * manner.
2822 - */
2823 - error = mem_cgroup_prepare_migration(old, new, &memcg, gfp_mask);
2824 - if (error)
2825 - return error;
2826 -
2827 error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
2828 if (!error) {
2829 struct address_space *mapping = old->mapping;
2830 @@ -435,13 +422,12 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
2831 if (PageSwapBacked(new))
2832 __inc_zone_page_state(new, NR_SHMEM);
2833 spin_unlock_irq(&mapping->tree_lock);
2834 + /* mem_cgroup codes must not be called under tree_lock */
2835 + mem_cgroup_replace_page_cache(old, new);
2836 radix_tree_preload_end();
2837 if (freepage)
2838 freepage(old);
2839 page_cache_release(old);
2840 - mem_cgroup_end_migration(memcg, old, new, true);
2841 - } else {
2842 - mem_cgroup_end_migration(memcg, old, new, false);
2843 }
2844
2845 return error;
2846 diff --git a/mm/memcontrol.c b/mm/memcontrol.c
2847 index d99217b..3791581 100644
2848 --- a/mm/memcontrol.c
2849 +++ b/mm/memcontrol.c
2850 @@ -3422,6 +3422,50 @@ int mem_cgroup_shmem_charge_fallback(struct page *page,
2851 return ret;
2852 }
2853
2854 +/*
2855 + * At replace page cache, newpage is not under any memcg but it's on
2856 + * LRU. So, this function doesn't touch res_counter but handles LRU
2857 + * in correct way. Both pages are locked so we cannot race with uncharge.
2858 + */
2859 +void mem_cgroup_replace_page_cache(struct page *oldpage,
2860 + struct page *newpage)
2861 +{
2862 + struct mem_cgroup *memcg;
2863 + struct page_cgroup *pc;
2864 + struct zone *zone;
2865 + enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;
2866 + unsigned long flags;
2867 +
2868 + if (mem_cgroup_disabled())
2869 + return;
2870 +
2871 + pc = lookup_page_cgroup(oldpage);
2872 + /* fix accounting on old pages */
2873 + lock_page_cgroup(pc);
2874 + memcg = pc->mem_cgroup;
2875 + mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), -1);
2876 + ClearPageCgroupUsed(pc);
2877 + unlock_page_cgroup(pc);
2878 +
2879 + if (PageSwapBacked(oldpage))
2880 + type = MEM_CGROUP_CHARGE_TYPE_SHMEM;
2881 +
2882 + zone = page_zone(newpage);
2883 + pc = lookup_page_cgroup(newpage);
2884 + /*
2885 + * Even if newpage->mapping was NULL before starting replacement,
2886 + * the newpage may be on LRU(or pagevec for LRU) already. We lock
2887 + * LRU while we overwrite pc->mem_cgroup.
2888 + */
2889 + spin_lock_irqsave(&zone->lru_lock, flags);
2890 + if (PageLRU(newpage))
2891 + del_page_from_lru_list(zone, newpage, page_lru(newpage));
2892 + __mem_cgroup_commit_charge(memcg, newpage, 1, pc, type);
2893 + if (PageLRU(newpage))
2894 + add_page_to_lru_list(zone, newpage, page_lru(newpage));
2895 + spin_unlock_irqrestore(&zone->lru_lock, flags);
2896 +}
2897 +
2898 #ifdef CONFIG_DEBUG_VM
2899 static struct page_cgroup *lookup_page_cgroup_used(struct page *page)
2900 {
2901 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
2902 index 8439d2a..947a7e9 100644
2903 --- a/mm/page_alloc.c
2904 +++ b/mm/page_alloc.c
2905 @@ -5565,6 +5565,17 @@ __count_immobile_pages(struct zone *zone, struct page *page, int count)
2906 bool is_pageblock_removable_nolock(struct page *page)
2907 {
2908 struct zone *zone = page_zone(page);
2909 + unsigned long pfn = page_to_pfn(page);
2910 +
2911 + /*
2912 + * We have to be careful here because we are iterating over memory
2913 + * sections which are not zone aware so we might end up outside of
2914 + * the zone but still within the section.
2915 + */
2916 + if (!zone || zone->zone_start_pfn > pfn ||
2917 + zone->zone_start_pfn + zone->spanned_pages <= pfn)
2918 + return false;
2919 +
2920 return __count_immobile_pages(zone, page, 0);
2921 }
2922
2923 diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
2924 index c1f4154..c7056b2 100644
2925 --- a/net/ipv4/ah4.c
2926 +++ b/net/ipv4/ah4.c
2927 @@ -136,8 +136,6 @@ static void ah_output_done(struct crypto_async_request *base, int err)
2928 memcpy(top_iph+1, iph+1, top_iph->ihl*4 - sizeof(struct iphdr));
2929 }
2930
2931 - err = ah->nexthdr;
2932 -
2933 kfree(AH_SKB_CB(skb)->tmp);
2934 xfrm_output_resume(skb, err);
2935 }
2936 @@ -264,12 +262,12 @@ static void ah_input_done(struct crypto_async_request *base, int err)
2937 if (err)
2938 goto out;
2939
2940 + err = ah->nexthdr;
2941 +
2942 skb->network_header += ah_hlen;
2943 memcpy(skb_network_header(skb), work_iph, ihl);
2944 __skb_pull(skb, ah_hlen + ihl);
2945 skb_set_transport_header(skb, -ihl);
2946 -
2947 - err = ah->nexthdr;
2948 out:
2949 kfree(AH_SKB_CB(skb)->tmp);
2950 xfrm_input_resume(skb, err);
2951 diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
2952 index 2195ae6..7a33aaa 100644
2953 --- a/net/ipv6/ah6.c
2954 +++ b/net/ipv6/ah6.c
2955 @@ -324,8 +324,6 @@ static void ah6_output_done(struct crypto_async_request *base, int err)
2956 #endif
2957 }
2958
2959 - err = ah->nexthdr;
2960 -
2961 kfree(AH_SKB_CB(skb)->tmp);
2962 xfrm_output_resume(skb, err);
2963 }
2964 @@ -466,12 +464,12 @@ static void ah6_input_done(struct crypto_async_request *base, int err)
2965 if (err)
2966 goto out;
2967
2968 + err = ah->nexthdr;
2969 +
2970 skb->network_header += ah_hlen;
2971 memcpy(skb_network_header(skb), work_iph, hdr_len);
2972 __skb_pull(skb, ah_hlen + hdr_len);
2973 skb_set_transport_header(skb, -hdr_len);
2974 -
2975 - err = ah->nexthdr;
2976 out:
2977 kfree(AH_SKB_CB(skb)->tmp);
2978 xfrm_input_resume(skb, err);
2979 diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
2980 index 8f6a302..aa1c40a 100644
2981 --- a/net/mac80211/wpa.c
2982 +++ b/net/mac80211/wpa.c
2983 @@ -109,7 +109,7 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
2984 if (status->flag & RX_FLAG_MMIC_ERROR)
2985 goto mic_fail;
2986
2987 - if (!(status->flag & RX_FLAG_IV_STRIPPED))
2988 + if (!(status->flag & RX_FLAG_IV_STRIPPED) && rx->key)
2989 goto update_iv;
2990
2991 return RX_CONTINUE;
2992 diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
2993 index 2b90292..ce5f111 100644
2994 --- a/net/sunrpc/svc.c
2995 +++ b/net/sunrpc/svc.c
2996 @@ -167,6 +167,7 @@ svc_pool_map_alloc_arrays(struct svc_pool_map *m, unsigned int maxpools)
2997
2998 fail_free:
2999 kfree(m->to_pool);
3000 + m->to_pool = NULL;
3001 fail:
3002 return -ENOMEM;
3003 }
3004 @@ -287,7 +288,9 @@ svc_pool_map_put(void)
3005 if (!--m->count) {
3006 m->mode = SVC_POOL_DEFAULT;
3007 kfree(m->to_pool);
3008 + m->to_pool = NULL;
3009 kfree(m->pool_to);
3010 + m->pool_to = NULL;
3011 m->npools = 0;
3012 }
3013
3014 @@ -472,17 +475,20 @@ svc_destroy(struct svc_serv *serv)
3015 printk("svc_destroy: no threads for serv=%p!\n", serv);
3016
3017 del_timer_sync(&serv->sv_temptimer);
3018 -
3019 - svc_close_all(&serv->sv_tempsocks);
3020 + /*
3021 + * The set of xprts (contained in the sv_tempsocks and
3022 + * sv_permsocks lists) is now constant, since it is modified
3023 + * only by accepting new sockets (done by service threads in
3024 + * svc_recv) or aging old ones (done by sv_temptimer), or
3025 + * configuration changes (excluded by whatever locking the
3026 + * caller is using--nfsd_mutex in the case of nfsd). So it's
3027 + * safe to traverse those lists and shut everything down:
3028 + */
3029 + svc_close_all(serv);
3030
3031 if (serv->sv_shutdown)
3032 serv->sv_shutdown(serv);
3033
3034 - svc_close_all(&serv->sv_permsocks);
3035 -
3036 - BUG_ON(!list_empty(&serv->sv_permsocks));
3037 - BUG_ON(!list_empty(&serv->sv_tempsocks));
3038 -
3039 cache_clean_deferred(serv);
3040
3041 if (svc_serv_is_pooled(serv))
3042 diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
3043 index bd31208..9d7ed0b 100644
3044 --- a/net/sunrpc/svc_xprt.c
3045 +++ b/net/sunrpc/svc_xprt.c
3046 @@ -901,14 +901,7 @@ void svc_delete_xprt(struct svc_xprt *xprt)
3047 spin_lock_bh(&serv->sv_lock);
3048 if (!test_and_set_bit(XPT_DETACHED, &xprt->xpt_flags))
3049 list_del_init(&xprt->xpt_list);
3050 - /*
3051 - * The only time we're called while xpt_ready is still on a list
3052 - * is while the list itself is about to be destroyed (in
3053 - * svc_destroy). BUT svc_xprt_enqueue could still be attempting
3054 - * to add new entries to the sp_sockets list, so we can't leave
3055 - * a freed xprt on it.
3056 - */
3057 - list_del_init(&xprt->xpt_ready);
3058 + BUG_ON(!list_empty(&xprt->xpt_ready));
3059 if (test_bit(XPT_TEMP, &xprt->xpt_flags))
3060 serv->sv_tmpcnt--;
3061 spin_unlock_bh(&serv->sv_lock);
3062 @@ -936,22 +929,48 @@ void svc_close_xprt(struct svc_xprt *xprt)
3063 }
3064 EXPORT_SYMBOL_GPL(svc_close_xprt);
3065
3066 -void svc_close_all(struct list_head *xprt_list)
3067 +static void svc_close_list(struct list_head *xprt_list)
3068 +{
3069 + struct svc_xprt *xprt;
3070 +
3071 + list_for_each_entry(xprt, xprt_list, xpt_list) {
3072 + set_bit(XPT_CLOSE, &xprt->xpt_flags);
3073 + set_bit(XPT_BUSY, &xprt->xpt_flags);
3074 + }
3075 +}
3076 +
3077 +void svc_close_all(struct svc_serv *serv)
3078 {
3079 + struct svc_pool *pool;
3080 struct svc_xprt *xprt;
3081 struct svc_xprt *tmp;
3082 + int i;
3083 +
3084 + svc_close_list(&serv->sv_tempsocks);
3085 + svc_close_list(&serv->sv_permsocks);
3086
3087 + for (i = 0; i < serv->sv_nrpools; i++) {
3088 + pool = &serv->sv_pools[i];
3089 +
3090 + spin_lock_bh(&pool->sp_lock);
3091 + while (!list_empty(&pool->sp_sockets)) {
3092 + xprt = list_first_entry(&pool->sp_sockets, struct svc_xprt, xpt_ready);
3093 + list_del_init(&xprt->xpt_ready);
3094 + }
3095 + spin_unlock_bh(&pool->sp_lock);
3096 + }
3097 /*
3098 - * The server is shutting down, and no more threads are running.
3099 - * svc_xprt_enqueue() might still be running, but at worst it
3100 - * will re-add the xprt to sp_sockets, which will soon get
3101 - * freed. So we don't bother with any more locking, and don't
3102 - * leave the close to the (nonexistent) server threads:
3103 + * At this point the sp_sockets lists will stay empty, since
3104 + * svc_enqueue will not add new entries without taking the
3105 + * sp_lock and checking XPT_BUSY.
3106 */
3107 - list_for_each_entry_safe(xprt, tmp, xprt_list, xpt_list) {
3108 - set_bit(XPT_CLOSE, &xprt->xpt_flags);
3109 + list_for_each_entry_safe(xprt, tmp, &serv->sv_tempsocks, xpt_list)
3110 svc_delete_xprt(xprt);
3111 - }
3112 + list_for_each_entry_safe(xprt, tmp, &serv->sv_permsocks, xpt_list)
3113 + svc_delete_xprt(xprt);
3114 +
3115 + BUG_ON(!list_empty(&serv->sv_permsocks));
3116 + BUG_ON(!list_empty(&serv->sv_tempsocks));
3117 }
3118
3119 /*
3120 diff --git a/scripts/kconfig/streamline_config.pl b/scripts/kconfig/streamline_config.pl
3121 index a4fe923..25f1e71 100644
3122 --- a/scripts/kconfig/streamline_config.pl
3123 +++ b/scripts/kconfig/streamline_config.pl
3124 @@ -242,33 +242,61 @@ if ($kconfig) {
3125 read_kconfig($kconfig);
3126 }
3127
3128 +sub convert_vars {
3129 + my ($line, %vars) = @_;
3130 +
3131 + my $process = "";
3132 +
3133 + while ($line =~ s/^(.*?)(\$\((.*?)\))//) {
3134 + my $start = $1;
3135 + my $variable = $2;
3136 + my $var = $3;
3137 +
3138 + if (defined($vars{$var})) {
3139 + $process .= $start . $vars{$var};
3140 + } else {
3141 + $process .= $start . $variable;
3142 + }
3143 + }
3144 +
3145 + $process .= $line;
3146 +
3147 + return $process;
3148 +}
3149 +
3150 # Read all Makefiles to map the configs to the objects
3151 foreach my $makefile (@makefiles) {
3152
3153 - my $cont = 0;
3154 + my $line = "";
3155 + my %make_vars;
3156
3157 open(MIN,$makefile) || die "Can't open $makefile";
3158 while (<MIN>) {
3159 + # if this line ends with a backslash, continue
3160 + chomp;
3161 + if (/^(.*)\\$/) {
3162 + $line .= $1;
3163 + next;
3164 + }
3165 +
3166 + $line .= $_;
3167 + $_ = $line;
3168 + $line = "";
3169 +
3170 my $objs;
3171
3172 - # is this a line after a line with a backslash?
3173 - if ($cont && /(\S.*)$/) {
3174 - $objs = $1;
3175 - }
3176 - $cont = 0;
3177 + $_ = convert_vars($_, %make_vars);
3178
3179 # collect objects after obj-$(CONFIG_FOO_BAR)
3180 if (/obj-\$\((CONFIG_[^\)]*)\)\s*[+:]?=\s*(.*)/) {
3181 $var = $1;
3182 $objs = $2;
3183 +
3184 + # check if variables are set
3185 + } elsif (/^\s*(\S+)\s*[:]?=\s*(.*\S)/) {
3186 + $make_vars{$1} = $2;
3187 }
3188 if (defined($objs)) {
3189 - # test if the line ends with a backslash
3190 - if ($objs =~ m,(.*)\\$,) {
3191 - $objs = $1;
3192 - $cont = 1;
3193 - }
3194 -
3195 foreach my $obj (split /\s+/,$objs) {
3196 $obj =~ s/-/_/g;
3197 if ($obj =~ /(.*)\.o$/) {
3198 diff --git a/scripts/recordmcount.h b/scripts/recordmcount.h
3199 index f40a6af6..54e35c1 100644
3200 --- a/scripts/recordmcount.h
3201 +++ b/scripts/recordmcount.h
3202 @@ -462,7 +462,7 @@ __has_rel_mcount(Elf_Shdr const *const relhdr, /* is SHT_REL or SHT_RELA */
3203 succeed_file();
3204 }
3205 if (w(txthdr->sh_type) != SHT_PROGBITS ||
3206 - !(w(txthdr->sh_flags) & SHF_EXECINSTR))
3207 + !(_w(txthdr->sh_flags) & SHF_EXECINSTR))
3208 return NULL;
3209 return txtname;
3210 }
3211 diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
3212 index da36d2c..5335605 100644
3213 --- a/security/integrity/ima/ima_api.c
3214 +++ b/security/integrity/ima/ima_api.c
3215 @@ -177,8 +177,8 @@ void ima_store_measurement(struct ima_iint_cache *iint, struct file *file,
3216 strncpy(entry->template.file_name, filename, IMA_EVENT_NAME_LEN_MAX);
3217
3218 result = ima_store_template(entry, violation, inode);
3219 - if (!result)
3220 + if (!result || result == -EEXIST)
3221 iint->flags |= IMA_MEASURED;
3222 - else
3223 + if (result < 0)
3224 kfree(entry);
3225 }
3226 diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
3227 index 8e28f04..55a6271 100644
3228 --- a/security/integrity/ima/ima_queue.c
3229 +++ b/security/integrity/ima/ima_queue.c
3230 @@ -23,6 +23,8 @@
3231 #include <linux/slab.h>
3232 #include "ima.h"
3233
3234 +#define AUDIT_CAUSE_LEN_MAX 32
3235 +
3236 LIST_HEAD(ima_measurements); /* list of all measurements */
3237
3238 /* key: inode (before secure-hashing a file) */
3239 @@ -94,7 +96,8 @@ static int ima_pcr_extend(const u8 *hash)
3240
3241 result = tpm_pcr_extend(TPM_ANY_NUM, CONFIG_IMA_MEASURE_PCR_IDX, hash);
3242 if (result != 0)
3243 - pr_err("IMA: Error Communicating to TPM chip\n");
3244 + pr_err("IMA: Error Communicating to TPM chip, result: %d\n",
3245 + result);
3246 return result;
3247 }
3248
3249 @@ -106,14 +109,16 @@ int ima_add_template_entry(struct ima_template_entry *entry, int violation,
3250 {
3251 u8 digest[IMA_DIGEST_SIZE];
3252 const char *audit_cause = "hash_added";
3253 + char tpm_audit_cause[AUDIT_CAUSE_LEN_MAX];
3254 int audit_info = 1;
3255 - int result = 0;
3256 + int result = 0, tpmresult = 0;
3257
3258 mutex_lock(&ima_extend_list_mutex);
3259 if (!violation) {
3260 memcpy(digest, entry->digest, sizeof digest);
3261 if (ima_lookup_digest_entry(digest)) {
3262 audit_cause = "hash_exists";
3263 + result = -EEXIST;
3264 goto out;
3265 }
3266 }
3267 @@ -128,9 +133,11 @@ int ima_add_template_entry(struct ima_template_entry *entry, int violation,
3268 if (violation) /* invalidate pcr */
3269 memset(digest, 0xff, sizeof digest);
3270
3271 - result = ima_pcr_extend(digest);
3272 - if (result != 0) {
3273 - audit_cause = "TPM error";
3274 + tpmresult = ima_pcr_extend(digest);
3275 + if (tpmresult != 0) {
3276 + snprintf(tpm_audit_cause, AUDIT_CAUSE_LEN_MAX, "TPM_error(%d)",
3277 + tpmresult);
3278 + audit_cause = tpm_audit_cause;
3279 audit_info = 0;
3280 }
3281 out:
3282 diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h
3283 index 08ec073..e289a13 100644
3284 --- a/sound/pci/hda/hda_local.h
3285 +++ b/sound/pci/hda/hda_local.h
3286 @@ -474,7 +474,12 @@ static inline u32 get_wcaps(struct hda_codec *codec, hda_nid_t nid)
3287 }
3288
3289 /* get the widget type from widget capability bits */
3290 -#define get_wcaps_type(wcaps) (((wcaps) & AC_WCAP_TYPE) >> AC_WCAP_TYPE_SHIFT)
3291 +static inline int get_wcaps_type(unsigned int wcaps)
3292 +{
3293 + if (!wcaps)
3294 + return -1; /* invalid type */
3295 + return (wcaps & AC_WCAP_TYPE) >> AC_WCAP_TYPE_SHIFT;
3296 +}
3297
3298 static inline unsigned int get_wcaps_channels(u32 wcaps)
3299 {
3300 diff --git a/sound/pci/hda/hda_proc.c b/sound/pci/hda/hda_proc.c
3301 index bfe74c2..6fe944a 100644
3302 --- a/sound/pci/hda/hda_proc.c
3303 +++ b/sound/pci/hda/hda_proc.c
3304 @@ -54,6 +54,8 @@ static const char *get_wid_type_name(unsigned int wid_value)
3305 [AC_WID_BEEP] = "Beep Generator Widget",
3306 [AC_WID_VENDOR] = "Vendor Defined Widget",
3307 };
3308 + if (wid_value == -1)
3309 + return "UNKNOWN Widget";
3310 wid_value &= 0xf;
3311 if (names[wid_value])
3312 return names[wid_value];
3313 diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
3314 index 5d2e97a..0d8db75 100644
3315 --- a/sound/pci/hda/patch_sigmatel.c
3316 +++ b/sound/pci/hda/patch_sigmatel.c
3317 @@ -1602,7 +1602,7 @@ static const struct snd_pci_quirk stac92hd73xx_cfg_tbl[] = {
3318 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02bd,
3319 "Dell Studio 1557", STAC_DELL_M6_DMIC),
3320 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02fe,
3321 - "Dell Studio XPS 1645", STAC_DELL_M6_BOTH),
3322 + "Dell Studio XPS 1645", STAC_DELL_M6_DMIC),
3323 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0413,
3324 "Dell Studio 1558", STAC_DELL_M6_DMIC),
3325 {} /* terminator */
3326 diff --git a/sound/pci/ice1712/amp.c b/sound/pci/ice1712/amp.c
3327 index e328cfb..e525da2 100644
3328 --- a/sound/pci/ice1712/amp.c
3329 +++ b/sound/pci/ice1712/amp.c
3330 @@ -68,8 +68,11 @@ static int __devinit snd_vt1724_amp_init(struct snd_ice1712 *ice)
3331
3332 static int __devinit snd_vt1724_amp_add_controls(struct snd_ice1712 *ice)
3333 {
3334 - /* we use pins 39 and 41 of the VT1616 for left and right read outputs */
3335 - snd_ac97_write_cache(ice->ac97, 0x5a, snd_ac97_read(ice->ac97, 0x5a) & ~0x8000);
3336 + if (ice->ac97)
3337 + /* we use pins 39 and 41 of the VT1616 for left and right
3338 + read outputs */
3339 + snd_ac97_write_cache(ice->ac97, 0x5a,
3340 + snd_ac97_read(ice->ac97, 0x5a) & ~0x8000);
3341 return 0;
3342 }
3343
3344 diff --git a/sound/pci/oxygen/xonar_wm87x6.c b/sound/pci/oxygen/xonar_wm87x6.c
3345 index 42d1ab1..915546a 100644
3346 --- a/sound/pci/oxygen/xonar_wm87x6.c
3347 +++ b/sound/pci/oxygen/xonar_wm87x6.c
3348 @@ -177,6 +177,7 @@ static void wm8776_registers_init(struct oxygen *chip)
3349 struct xonar_wm87x6 *data = chip->model_data;
3350
3351 wm8776_write(chip, WM8776_RESET, 0);
3352 + wm8776_write(chip, WM8776_PHASESWAP, WM8776_PH_MASK);
3353 wm8776_write(chip, WM8776_DACCTRL1, WM8776_DZCEN |
3354 WM8776_PL_LEFT_LEFT | WM8776_PL_RIGHT_RIGHT);
3355 wm8776_write(chip, WM8776_DACMUTE, chip->dac_mute ? WM8776_DMUTE : 0);
3356 diff --git a/sound/usb/usx2y/usb_stream.c b/sound/usb/usx2y/usb_stream.c
3357 index c400ade..1e7a47a 100644
3358 --- a/sound/usb/usx2y/usb_stream.c
3359 +++ b/sound/usb/usx2y/usb_stream.c
3360 @@ -674,7 +674,7 @@ dotry:
3361 inurb->transfer_buffer_length =
3362 inurb->number_of_packets *
3363 inurb->iso_frame_desc[0].length;
3364 - preempt_disable();
3365 +
3366 if (u == 0) {
3367 int now;
3368 struct usb_device *dev = inurb->dev;
3369 @@ -686,19 +686,17 @@ dotry:
3370 }
3371 err = usb_submit_urb(inurb, GFP_ATOMIC);
3372 if (err < 0) {
3373 - preempt_enable();
3374 snd_printk(KERN_ERR"usb_submit_urb(sk->inurb[%i])"
3375 " returned %i\n", u, err);
3376 return err;
3377 }
3378 err = usb_submit_urb(outurb, GFP_ATOMIC);
3379 if (err < 0) {
3380 - preempt_enable();
3381 snd_printk(KERN_ERR"usb_submit_urb(sk->outurb[%i])"
3382 " returned %i\n", u, err);
3383 return err;
3384 }
3385 - preempt_enable();
3386 +
3387 if (inurb->start_frame != outurb->start_frame) {
3388 snd_printd(KERN_DEBUG
3389 "u[%i] start_frames differ in:%u out:%u\n",

  ViewVC Help
Powered by ViewVC 1.1.20