/[linux-patches]/genpatches-2.6/tags/2.6.32-15/1005_linux-2.6.32.6.patch
Gentoo

Contents of /genpatches-2.6/tags/2.6.32-15/1005_linux-2.6.32.6.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1735 - (show annotations) (download)
Wed Aug 4 11:25:09 2010 UTC (4 years, 8 months ago) by mpagano
File size: 42826 byte(s)
2.6.32-15 release
1 diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
2 index 6a52d4b..f8590c5 100644
3 --- a/arch/x86/kernel/cpuid.c
4 +++ b/arch/x86/kernel/cpuid.c
5 @@ -192,7 +192,8 @@ static int __init cpuid_init(void)
6 int i, err = 0;
7 i = 0;
8
9 - if (register_chrdev(CPUID_MAJOR, "cpu/cpuid", &cpuid_fops)) {
10 + if (__register_chrdev(CPUID_MAJOR, 0, NR_CPUS,
11 + "cpu/cpuid", &cpuid_fops)) {
12 printk(KERN_ERR "cpuid: unable to get major %d for cpuid\n",
13 CPUID_MAJOR);
14 err = -EBUSY;
15 @@ -221,7 +222,7 @@ out_class:
16 }
17 class_destroy(cpuid_class);
18 out_chrdev:
19 - unregister_chrdev(CPUID_MAJOR, "cpu/cpuid");
20 + __unregister_chrdev(CPUID_MAJOR, 0, NR_CPUS, "cpu/cpuid");
21 out:
22 return err;
23 }
24 diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
25 index 6a3cefc..b42e63b 100644
26 --- a/arch/x86/kernel/msr.c
27 +++ b/arch/x86/kernel/msr.c
28 @@ -251,7 +251,7 @@ static int __init msr_init(void)
29 int i, err = 0;
30 i = 0;
31
32 - if (register_chrdev(MSR_MAJOR, "cpu/msr", &msr_fops)) {
33 + if (__register_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr", &msr_fops)) {
34 printk(KERN_ERR "msr: unable to get major %d for msr\n",
35 MSR_MAJOR);
36 err = -EBUSY;
37 @@ -279,7 +279,7 @@ out_class:
38 msr_device_destroy(i);
39 class_destroy(msr_class);
40 out_chrdev:
41 - unregister_chrdev(MSR_MAJOR, "cpu/msr");
42 + __unregister_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr");
43 out:
44 return err;
45 }
46 diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c
47 index b22d13b..a672f12 100644
48 --- a/arch/x86/pci/i386.c
49 +++ b/arch/x86/pci/i386.c
50 @@ -282,6 +282,15 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
51 return -EINVAL;
52
53 prot = pgprot_val(vma->vm_page_prot);
54 +
55 + /*
56 + * Return error if pat is not enabled and write_combine is requested.
57 + * Caller can followup with UC MINUS request and add a WC mtrr if there
58 + * is a free mtrr slot.
59 + */
60 + if (!pat_enabled && write_combine)
61 + return -EINVAL;
62 +
63 if (pat_enabled && write_combine)
64 prot |= _PAGE_CACHE_WC;
65 else if (pat_enabled || boot_cpu_data.x86 > 3)
66 diff --git a/block/blk-settings.c b/block/blk-settings.c
67 index 66d4aa8..d5aa886 100644
68 --- a/block/blk-settings.c
69 +++ b/block/blk-settings.c
70 @@ -560,6 +560,28 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
71 EXPORT_SYMBOL(blk_stack_limits);
72
73 /**
74 + * bdev_stack_limits - adjust queue limits for stacked drivers
75 + * @t: the stacking driver limits (top device)
76 + * @bdev: the component block_device (bottom)
77 + * @start: first data sector within component device
78 + *
79 + * Description:
80 + * Merges queue limits for a top device and a block_device. Returns
81 + * 0 if alignment didn't change. Returns -1 if adding the bottom
82 + * device caused misalignment.
83 + */
84 +int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
85 + sector_t start)
86 +{
87 + struct request_queue *bq = bdev_get_queue(bdev);
88 +
89 + start += get_start_sect(bdev);
90 +
91 + return blk_stack_limits(t, &bq->limits, start << 9);
92 +}
93 +EXPORT_SYMBOL(bdev_stack_limits);
94 +
95 +/**
96 * disk_stack_limits - adjust queue limits for stacked drivers
97 * @disk: MD/DM gendisk (top)
98 * @bdev: the underlying block device (bottom)
99 diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
100 index 7511029..f1670e0 100644
101 --- a/drivers/acpi/ec.c
102 +++ b/drivers/acpi/ec.c
103 @@ -201,14 +201,13 @@ unlock:
104 spin_unlock_irqrestore(&ec->curr_lock, flags);
105 }
106
107 -static void acpi_ec_gpe_query(void *ec_cxt);
108 +static int acpi_ec_sync_query(struct acpi_ec *ec);
109
110 -static int ec_check_sci(struct acpi_ec *ec, u8 state)
111 +static int ec_check_sci_sync(struct acpi_ec *ec, u8 state)
112 {
113 if (state & ACPI_EC_FLAG_SCI) {
114 if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
115 - return acpi_os_execute(OSL_EC_BURST_HANDLER,
116 - acpi_ec_gpe_query, ec);
117 + return acpi_ec_sync_query(ec);
118 }
119 return 0;
120 }
121 @@ -249,11 +248,6 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
122 {
123 unsigned long tmp;
124 int ret = 0;
125 - pr_debug(PREFIX "transaction start\n");
126 - /* disable GPE during transaction if storm is detected */
127 - if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
128 - acpi_disable_gpe(NULL, ec->gpe);
129 - }
130 if (EC_FLAGS_MSI)
131 udelay(ACPI_EC_MSI_UDELAY);
132 /* start transaction */
133 @@ -265,20 +259,9 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
134 clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
135 spin_unlock_irqrestore(&ec->curr_lock, tmp);
136 ret = ec_poll(ec);
137 - pr_debug(PREFIX "transaction end\n");
138 spin_lock_irqsave(&ec->curr_lock, tmp);
139 ec->curr = NULL;
140 spin_unlock_irqrestore(&ec->curr_lock, tmp);
141 - if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
142 - /* check if we received SCI during transaction */
143 - ec_check_sci(ec, acpi_ec_read_status(ec));
144 - /* it is safe to enable GPE outside of transaction */
145 - acpi_enable_gpe(NULL, ec->gpe);
146 - } else if (t->irq_count > ACPI_EC_STORM_THRESHOLD) {
147 - pr_info(PREFIX "GPE storm detected, "
148 - "transactions will use polling mode\n");
149 - set_bit(EC_FLAGS_GPE_STORM, &ec->flags);
150 - }
151 return ret;
152 }
153
154 @@ -321,7 +304,26 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
155 status = -ETIME;
156 goto end;
157 }
158 + pr_debug(PREFIX "transaction start\n");
159 + /* disable GPE during transaction if storm is detected */
160 + if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
161 + acpi_disable_gpe(NULL, ec->gpe);
162 + }
163 +
164 status = acpi_ec_transaction_unlocked(ec, t);
165 +
166 + /* check if we received SCI during transaction */
167 + ec_check_sci_sync(ec, acpi_ec_read_status(ec));
168 + if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
169 + msleep(1);
170 + /* it is safe to enable GPE outside of transaction */
171 + acpi_enable_gpe(NULL, ec->gpe);
172 + } else if (t->irq_count > ACPI_EC_STORM_THRESHOLD) {
173 + pr_info(PREFIX "GPE storm detected, "
174 + "transactions will use polling mode\n");
175 + set_bit(EC_FLAGS_GPE_STORM, &ec->flags);
176 + }
177 + pr_debug(PREFIX "transaction end\n");
178 end:
179 if (ec->global_lock)
180 acpi_release_global_lock(glk);
181 @@ -443,7 +445,7 @@ int ec_transaction(u8 command,
182
183 EXPORT_SYMBOL(ec_transaction);
184
185 -static int acpi_ec_query(struct acpi_ec *ec, u8 * data)
186 +static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 * data)
187 {
188 int result;
189 u8 d;
190 @@ -452,20 +454,16 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 * data)
191 .wlen = 0, .rlen = 1};
192 if (!ec || !data)
193 return -EINVAL;
194 -
195 /*
196 * Query the EC to find out which _Qxx method we need to evaluate.
197 * Note that successful completion of the query causes the ACPI_EC_SCI
198 * bit to be cleared (and thus clearing the interrupt source).
199 */
200 -
201 - result = acpi_ec_transaction(ec, &t);
202 + result = acpi_ec_transaction_unlocked(ec, &t);
203 if (result)
204 return result;
205 -
206 if (!d)
207 return -ENODATA;
208 -
209 *data = d;
210 return 0;
211 }
212 @@ -509,43 +507,78 @@ void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit)
213
214 EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler);
215
216 -static void acpi_ec_gpe_query(void *ec_cxt)
217 +static void acpi_ec_run(void *cxt)
218 {
219 - struct acpi_ec *ec = ec_cxt;
220 - u8 value = 0;
221 - struct acpi_ec_query_handler *handler, copy;
222 -
223 - if (!ec || acpi_ec_query(ec, &value))
224 + struct acpi_ec_query_handler *handler = cxt;
225 + if (!handler)
226 return;
227 - mutex_lock(&ec->lock);
228 + pr_debug(PREFIX "start query execution\n");
229 + if (handler->func)
230 + handler->func(handler->data);
231 + else if (handler->handle)
232 + acpi_evaluate_object(handler->handle, NULL, NULL, NULL);
233 + pr_debug(PREFIX "stop query execution\n");
234 + kfree(handler);
235 +}
236 +
237 +static int acpi_ec_sync_query(struct acpi_ec *ec)
238 +{
239 + u8 value = 0;
240 + int status;
241 + struct acpi_ec_query_handler *handler, *copy;
242 + if ((status = acpi_ec_query_unlocked(ec, &value)))
243 + return status;
244 list_for_each_entry(handler, &ec->list, node) {
245 if (value == handler->query_bit) {
246 /* have custom handler for this bit */
247 - memcpy(&copy, handler, sizeof(copy));
248 - mutex_unlock(&ec->lock);
249 - if (copy.func) {
250 - copy.func(copy.data);
251 - } else if (copy.handle) {
252 - acpi_evaluate_object(copy.handle, NULL, NULL, NULL);
253 - }
254 - return;
255 + copy = kmalloc(sizeof(*handler), GFP_KERNEL);
256 + if (!copy)
257 + return -ENOMEM;
258 + memcpy(copy, handler, sizeof(*copy));
259 + pr_debug(PREFIX "push query execution (0x%2x) on queue\n", value);
260 + return acpi_os_execute(OSL_GPE_HANDLER,
261 + acpi_ec_run, copy);
262 }
263 }
264 + return 0;
265 +}
266 +
267 +static void acpi_ec_gpe_query(void *ec_cxt)
268 +{
269 + struct acpi_ec *ec = ec_cxt;
270 + if (!ec)
271 + return;
272 + mutex_lock(&ec->lock);
273 + acpi_ec_sync_query(ec);
274 mutex_unlock(&ec->lock);
275 }
276
277 +static void acpi_ec_gpe_query(void *ec_cxt);
278 +
279 +static int ec_check_sci(struct acpi_ec *ec, u8 state)
280 +{
281 + if (state & ACPI_EC_FLAG_SCI) {
282 + if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) {
283 + pr_debug(PREFIX "push gpe query to the queue\n");
284 + return acpi_os_execute(OSL_NOTIFY_HANDLER,
285 + acpi_ec_gpe_query, ec);
286 + }
287 + }
288 + return 0;
289 +}
290 +
291 static u32 acpi_ec_gpe_handler(void *data)
292 {
293 struct acpi_ec *ec = data;
294 - u8 status;
295
296 pr_debug(PREFIX "~~~> interrupt\n");
297 - status = acpi_ec_read_status(ec);
298
299 - advance_transaction(ec, status);
300 - if (ec_transaction_done(ec) && (status & ACPI_EC_FLAG_IBF) == 0)
301 + advance_transaction(ec, acpi_ec_read_status(ec));
302 + if (ec_transaction_done(ec) &&
303 + (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF) == 0) {
304 wake_up(&ec->wait);
305 - ec_check_sci(ec, status);
306 + ec_check_sci(ec, acpi_ec_read_status(ec));
307 + }
308 return ACPI_INTERRUPT_HANDLED;
309 }
310
311 diff --git a/drivers/char/nozomi.c b/drivers/char/nozomi.c
312 index d3400b2..dc52f75 100644
313 --- a/drivers/char/nozomi.c
314 +++ b/drivers/char/nozomi.c
315 @@ -1629,10 +1629,10 @@ static void ntty_close(struct tty_struct *tty, struct file *file)
316
317 dc->open_ttys--;
318 port->count--;
319 - tty_port_tty_set(port, NULL);
320
321 if (port->count == 0) {
322 DBG1("close: %d", nport->token_dl);
323 + tty_port_tty_set(port, NULL);
324 spin_lock_irqsave(&dc->spin_mutex, flags);
325 dc->last_ier &= ~(nport->token_dl);
326 writew(dc->last_ier, dc->reg_ier);
327 diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
328 index 59499ee..e919bd9 100644
329 --- a/drivers/char/tty_io.c
330 +++ b/drivers/char/tty_io.c
331 @@ -1930,8 +1930,8 @@ static int tty_fasync(int fd, struct file *filp, int on)
332 pid = task_pid(current);
333 type = PIDTYPE_PID;
334 }
335 - spin_unlock_irqrestore(&tty->ctrl_lock, flags);
336 retval = __f_setown(filp, pid, type, 0);
337 + spin_unlock_irqrestore(&tty->ctrl_lock, flags);
338 if (retval)
339 goto out;
340 } else {
341 diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
342 index 083bec2..29e21d3 100644
343 --- a/drivers/gpu/drm/i915/intel_sdvo.c
344 +++ b/drivers/gpu/drm/i915/intel_sdvo.c
345 @@ -472,14 +472,63 @@ static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
346 }
347
348 /**
349 - * Don't check status code from this as it switches the bus back to the
350 - * SDVO chips which defeats the purpose of doing a bus switch in the first
351 - * place.
352 + * Try to read the response after issuie the DDC switch command. But it
353 + * is noted that we must do the action of reading response and issuing DDC
354 + * switch command in one I2C transaction. Otherwise when we try to start
355 + * another I2C transaction after issuing the DDC bus switch, it will be
356 + * switched to the internal SDVO register.
357 */
358 static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output,
359 u8 target)
360 {
361 - intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_CONTROL_BUS_SWITCH, &target, 1);
362 + struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
363 + u8 out_buf[2], cmd_buf[2], ret_value[2], ret;
364 + struct i2c_msg msgs[] = {
365 + {
366 + .addr = sdvo_priv->slave_addr >> 1,
367 + .flags = 0,
368 + .len = 2,
369 + .buf = out_buf,
370 + },
371 + /* the following two are to read the response */
372 + {
373 + .addr = sdvo_priv->slave_addr >> 1,
374 + .flags = 0,
375 + .len = 1,
376 + .buf = cmd_buf,
377 + },
378 + {
379 + .addr = sdvo_priv->slave_addr >> 1,
380 + .flags = I2C_M_RD,
381 + .len = 1,
382 + .buf = ret_value,
383 + },
384 + };
385 +
386 + intel_sdvo_debug_write(intel_output, SDVO_CMD_SET_CONTROL_BUS_SWITCH,
387 + &target, 1);
388 + /* write the DDC switch command argument */
389 + intel_sdvo_write_byte(intel_output, SDVO_I2C_ARG_0, target);
390 +
391 + out_buf[0] = SDVO_I2C_OPCODE;
392 + out_buf[1] = SDVO_CMD_SET_CONTROL_BUS_SWITCH;
393 + cmd_buf[0] = SDVO_I2C_CMD_STATUS;
394 + cmd_buf[1] = 0;
395 + ret_value[0] = 0;
396 + ret_value[1] = 0;
397 +
398 + ret = i2c_transfer(intel_output->i2c_bus, msgs, 3);
399 + if (ret != 3) {
400 + /* failure in I2C transfer */
401 + DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
402 + return;
403 + }
404 + if (ret_value[0] != SDVO_CMD_STATUS_SUCCESS) {
405 + DRM_DEBUG_KMS("DDC switch command returns response %d\n",
406 + ret_value[0]);
407 + return;
408 + }
409 + return;
410 }
411
412 static bool intel_sdvo_set_target_input(struct intel_output *intel_output, bool target_0, bool target_1)
413 @@ -1589,6 +1638,32 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response)
414 edid = drm_get_edid(&intel_output->base,
415 intel_output->ddc_bus);
416
417 + /* This is only applied to SDVO cards with multiple outputs */
418 + if (edid == NULL && intel_sdvo_multifunc_encoder(intel_output)) {
419 + uint8_t saved_ddc, temp_ddc;
420 + saved_ddc = sdvo_priv->ddc_bus;
421 + temp_ddc = sdvo_priv->ddc_bus >> 1;
422 + /*
423 + * Don't use the 1 as the argument of DDC bus switch to get
424 + * the EDID. It is used for SDVO SPD ROM.
425 + */
426 + while(temp_ddc > 1) {
427 + sdvo_priv->ddc_bus = temp_ddc;
428 + edid = drm_get_edid(&intel_output->base,
429 + intel_output->ddc_bus);
430 + if (edid) {
431 + /*
432 + * When we can get the EDID, maybe it is the
433 + * correct DDC bus. Update it.
434 + */
435 + sdvo_priv->ddc_bus = temp_ddc;
436 + break;
437 + }
438 + temp_ddc >>= 1;
439 + }
440 + if (edid == NULL)
441 + sdvo_priv->ddc_bus = saved_ddc;
442 + }
443 /* when there is no edid and no monitor is connected with VGA
444 * port, try to use the CRT ddc to read the EDID for DVI-connector
445 */
446 diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
447 index 1a6cb3c..e869128 100644
448 --- a/drivers/md/dm-table.c
449 +++ b/drivers/md/dm-table.c
450 @@ -499,16 +499,15 @@ int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
451 return 0;
452 }
453
454 - if (blk_stack_limits(limits, &q->limits, start << 9) < 0)
455 - DMWARN("%s: target device %s is misaligned: "
456 + if (bdev_stack_limits(limits, bdev, start) < 0)
457 + DMWARN("%s: adding target device %s caused an alignment inconsistency: "
458 "physical_block_size=%u, logical_block_size=%u, "
459 "alignment_offset=%u, start=%llu",
460 dm_device_name(ti->table->md), bdevname(bdev, b),
461 q->limits.physical_block_size,
462 q->limits.logical_block_size,
463 q->limits.alignment_offset,
464 - (unsigned long long) start << 9);
465 -
466 + (unsigned long long) start << SECTOR_SHIFT);
467
468 /*
469 * Check if merge fn is supported.
470 @@ -1025,9 +1024,9 @@ combine_limits:
471 * for the table.
472 */
473 if (blk_stack_limits(limits, &ti_limits, 0) < 0)
474 - DMWARN("%s: target device "
475 + DMWARN("%s: adding target device "
476 "(start sect %llu len %llu) "
477 - "is misaligned",
478 + "caused an alignment inconsistency",
479 dm_device_name(table->md),
480 (unsigned long long) ti->begin,
481 (unsigned long long) ti->len);
482 @@ -1079,15 +1078,6 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
483 struct queue_limits *limits)
484 {
485 /*
486 - * Each target device in the table has a data area that should normally
487 - * be aligned such that the DM device's alignment_offset is 0.
488 - * FIXME: Propagate alignment_offsets up the stack and warn of
489 - * sub-optimal or inconsistent settings.
490 - */
491 - limits->alignment_offset = 0;
492 - limits->misaligned = 0;
493 -
494 - /*
495 * Copy table's limits to the DM device's request_queue
496 */
497 q->limits = *limits;
498 diff --git a/drivers/media/video/gspca/sunplus.c b/drivers/media/video/gspca/sunplus.c
499 index aa8f995..28b4625 100644
500 --- a/drivers/media/video/gspca/sunplus.c
501 +++ b/drivers/media/video/gspca/sunplus.c
502 @@ -705,7 +705,7 @@ static void spca504B_SetSizeType(struct gspca_dev *gspca_dev)
503 rc = spca504B_PollingDataReady(gspca_dev);
504
505 /* Init the cam width height with some values get on init ? */
506 - reg_w_riv(dev, 0x31, 0, 0x04);
507 + reg_w_riv(dev, 0x31, 0x04, 0);
508 spca504B_WaitCmdStatus(gspca_dev);
509 rc = spca504B_PollingDataReady(gspca_dev);
510 break;
511 @@ -807,14 +807,14 @@ static void init_ctl_reg(struct gspca_dev *gspca_dev)
512 default:
513 /* case BRIDGE_SPCA533: */
514 /* case BRIDGE_SPCA504B: */
515 - reg_w_riv(dev, 0, 0x00, 0x21ad); /* hue */
516 - reg_w_riv(dev, 0, 0x01, 0x21ac); /* sat/hue */
517 - reg_w_riv(dev, 0, 0x00, 0x21a3); /* gamma */
518 + reg_w_riv(dev, 0, 0x21ad, 0x00); /* hue */
519 + reg_w_riv(dev, 0, 0x21ac, 0x01); /* sat/hue */
520 + reg_w_riv(dev, 0, 0x21a3, 0x00); /* gamma */
521 break;
522 case BRIDGE_SPCA536:
523 - reg_w_riv(dev, 0, 0x40, 0x20f5);
524 - reg_w_riv(dev, 0, 0x01, 0x20f4);
525 - reg_w_riv(dev, 0, 0x00, 0x2089);
526 + reg_w_riv(dev, 0, 0x20f5, 0x40);
527 + reg_w_riv(dev, 0, 0x20f4, 0x01);
528 + reg_w_riv(dev, 0, 0x2089, 0x00);
529 break;
530 }
531 if (pollreg)
532 @@ -888,11 +888,11 @@ static int sd_init(struct gspca_dev *gspca_dev)
533 switch (sd->bridge) {
534 case BRIDGE_SPCA504B:
535 reg_w_riv(dev, 0x1d, 0x00, 0);
536 - reg_w_riv(dev, 0, 0x01, 0x2306);
537 - reg_w_riv(dev, 0, 0x00, 0x0d04);
538 - reg_w_riv(dev, 0, 0x00, 0x2000);
539 - reg_w_riv(dev, 0, 0x13, 0x2301);
540 - reg_w_riv(dev, 0, 0x00, 0x2306);
541 + reg_w_riv(dev, 0, 0x2306, 0x01);
542 + reg_w_riv(dev, 0, 0x0d04, 0x00);
543 + reg_w_riv(dev, 0, 0x2000, 0x00);
544 + reg_w_riv(dev, 0, 0x2301, 0x13);
545 + reg_w_riv(dev, 0, 0x2306, 0x00);
546 /* fall thru */
547 case BRIDGE_SPCA533:
548 spca504B_PollingDataReady(gspca_dev);
549 @@ -1011,7 +1011,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
550 spca504B_WaitCmdStatus(gspca_dev);
551 break;
552 default:
553 - reg_w_riv(dev, 0x31, 0, 0x04);
554 + reg_w_riv(dev, 0x31, 0x04, 0);
555 spca504B_WaitCmdStatus(gspca_dev);
556 spca504B_PollingDataReady(gspca_dev);
557 break;
558 diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
559 index e9eae4a..1eac626 100644
560 --- a/drivers/misc/enclosure.c
561 +++ b/drivers/misc/enclosure.c
562 @@ -391,6 +391,7 @@ static const char *const enclosure_status [] = {
563 [ENCLOSURE_STATUS_NOT_INSTALLED] = "not installed",
564 [ENCLOSURE_STATUS_UNKNOWN] = "unknown",
565 [ENCLOSURE_STATUS_UNAVAILABLE] = "unavailable",
566 + [ENCLOSURE_STATUS_MAX] = NULL,
567 };
568
569 static const char *const enclosure_type [] = {
570 diff --git a/drivers/serial/8250_pnp.c b/drivers/serial/8250_pnp.c
571 index 0cce8a4..deac67e 100644
572 --- a/drivers/serial/8250_pnp.c
573 +++ b/drivers/serial/8250_pnp.c
574 @@ -328,15 +328,7 @@ static const struct pnp_device_id pnp_dev_table[] = {
575 /* U.S. Robotics 56K Voice INT PnP*/
576 { "USR9190", 0 },
577 /* Wacom tablets */
578 - { "WACF004", 0 },
579 - { "WACF005", 0 },
580 - { "WACF006", 0 },
581 - { "WACF007", 0 },
582 - { "WACF008", 0 },
583 - { "WACF009", 0 },
584 - { "WACF00A", 0 },
585 - { "WACF00B", 0 },
586 - { "WACF00C", 0 },
587 + { "WACFXXX", 0 },
588 /* Compaq touchscreen */
589 { "FPI2002", 0 },
590 /* Fujitsu Stylistic touchscreens */
591 diff --git a/drivers/staging/asus_oled/asus_oled.c b/drivers/staging/asus_oled/asus_oled.c
592 index f4c2657..43c57b7 100644
593 --- a/drivers/staging/asus_oled/asus_oled.c
594 +++ b/drivers/staging/asus_oled/asus_oled.c
595 @@ -194,9 +194,11 @@ static ssize_t set_enabled(struct device *dev, struct device_attribute *attr,
596 {
597 struct usb_interface *intf = to_usb_interface(dev);
598 struct asus_oled_dev *odev = usb_get_intfdata(intf);
599 - int temp = strict_strtoul(buf, 10, NULL);
600 + unsigned long value;
601 + if (strict_strtoul(buf, 10, &value))
602 + return -EINVAL;
603
604 - enable_oled(odev, temp);
605 + enable_oled(odev, value);
606
607 return count;
608 }
609 @@ -207,10 +209,12 @@ static ssize_t class_set_enabled(struct device *device,
610 {
611 struct asus_oled_dev *odev =
612 (struct asus_oled_dev *) dev_get_drvdata(device);
613 + unsigned long value;
614
615 - int temp = strict_strtoul(buf, 10, NULL);
616 + if (strict_strtoul(buf, 10, &value))
617 + return -EINVAL;
618
619 - enable_oled(odev, temp);
620 + enable_oled(odev, value);
621
622 return count;
623 }
624 diff --git a/drivers/staging/hv/Hv.c b/drivers/staging/hv/Hv.c
625 index c5b6613..c2809f2 100644
626 --- a/drivers/staging/hv/Hv.c
627 +++ b/drivers/staging/hv/Hv.c
628 @@ -386,7 +386,7 @@ u16 HvSignalEvent(void)
629 * retrieve the initialized message and event pages. Otherwise, we create and
630 * initialize the message and event pages.
631 */
632 -int HvSynicInit(u32 irqVector)
633 +void HvSynicInit(void *irqarg)
634 {
635 u64 version;
636 union hv_synic_simp simp;
637 @@ -394,13 +394,14 @@ int HvSynicInit(u32 irqVector)
638 union hv_synic_sint sharedSint;
639 union hv_synic_scontrol sctrl;
640 u64 guestID;
641 - int ret = 0;
642 + u32 irqVector = *((u32 *)(irqarg));
643 + int cpu = smp_processor_id();
644
645 DPRINT_ENTER(VMBUS);
646
647 if (!gHvContext.HypercallPage) {
648 DPRINT_EXIT(VMBUS);
649 - return ret;
650 + return;
651 }
652
653 /* Check the version */
654 @@ -425,27 +426,27 @@ int HvSynicInit(u32 irqVector)
655 */
656 rdmsrl(HV_X64_MSR_GUEST_OS_ID, guestID);
657 if (guestID == HV_LINUX_GUEST_ID) {
658 - gHvContext.synICMessagePage[0] =
659 + gHvContext.synICMessagePage[cpu] =
660 phys_to_virt(simp.BaseSimpGpa << PAGE_SHIFT);
661 - gHvContext.synICEventPage[0] =
662 + gHvContext.synICEventPage[cpu] =
663 phys_to_virt(siefp.BaseSiefpGpa << PAGE_SHIFT);
664 } else {
665 DPRINT_ERR(VMBUS, "unknown guest id!!");
666 goto Cleanup;
667 }
668 DPRINT_DBG(VMBUS, "MAPPED: Simp: %p, Sifep: %p",
669 - gHvContext.synICMessagePage[0],
670 - gHvContext.synICEventPage[0]);
671 + gHvContext.synICMessagePage[cpu],
672 + gHvContext.synICEventPage[cpu]);
673 } else {
674 - gHvContext.synICMessagePage[0] = osd_PageAlloc(1);
675 - if (gHvContext.synICMessagePage[0] == NULL) {
676 + gHvContext.synICMessagePage[cpu] = (void *)get_zeroed_page(GFP_ATOMIC);
677 + if (gHvContext.synICMessagePage[cpu] == NULL) {
678 DPRINT_ERR(VMBUS,
679 "unable to allocate SYNIC message page!!");
680 goto Cleanup;
681 }
682
683 - gHvContext.synICEventPage[0] = osd_PageAlloc(1);
684 - if (gHvContext.synICEventPage[0] == NULL) {
685 + gHvContext.synICEventPage[cpu] = (void *)get_zeroed_page(GFP_ATOMIC);
686 + if (gHvContext.synICEventPage[cpu] == NULL) {
687 DPRINT_ERR(VMBUS,
688 "unable to allocate SYNIC event page!!");
689 goto Cleanup;
690 @@ -454,7 +455,7 @@ int HvSynicInit(u32 irqVector)
691 /* Setup the Synic's message page */
692 rdmsrl(HV_X64_MSR_SIMP, simp.AsUINT64);
693 simp.SimpEnabled = 1;
694 - simp.BaseSimpGpa = virt_to_phys(gHvContext.synICMessagePage[0])
695 + simp.BaseSimpGpa = virt_to_phys(gHvContext.synICMessagePage[cpu])
696 >> PAGE_SHIFT;
697
698 DPRINT_DBG(VMBUS, "HV_X64_MSR_SIMP msr set to: %llx",
699 @@ -465,7 +466,7 @@ int HvSynicInit(u32 irqVector)
700 /* Setup the Synic's event page */
701 rdmsrl(HV_X64_MSR_SIEFP, siefp.AsUINT64);
702 siefp.SiefpEnabled = 1;
703 - siefp.BaseSiefpGpa = virt_to_phys(gHvContext.synICEventPage[0])
704 + siefp.BaseSiefpGpa = virt_to_phys(gHvContext.synICEventPage[cpu])
705 >> PAGE_SHIFT;
706
707 DPRINT_DBG(VMBUS, "HV_X64_MSR_SIEFP msr set to: %llx",
708 @@ -501,32 +502,30 @@ int HvSynicInit(u32 irqVector)
709
710 DPRINT_EXIT(VMBUS);
711
712 - return ret;
713 + return;
714
715 Cleanup:
716 - ret = -1;
717 -
718 if (gHvContext.GuestId == HV_LINUX_GUEST_ID) {
719 - if (gHvContext.synICEventPage[0])
720 - osd_PageFree(gHvContext.synICEventPage[0], 1);
721 + if (gHvContext.synICEventPage[cpu])
722 + osd_PageFree(gHvContext.synICEventPage[cpu], 1);
723
724 - if (gHvContext.synICMessagePage[0])
725 - osd_PageFree(gHvContext.synICMessagePage[0], 1);
726 + if (gHvContext.synICMessagePage[cpu])
727 + osd_PageFree(gHvContext.synICMessagePage[cpu], 1);
728 }
729
730 DPRINT_EXIT(VMBUS);
731 -
732 - return ret;
733 + return;
734 }
735
736 /**
737 * HvSynicCleanup - Cleanup routine for HvSynicInit().
738 */
739 -void HvSynicCleanup(void)
740 +void HvSynicCleanup(void *arg)
741 {
742 union hv_synic_sint sharedSint;
743 union hv_synic_simp simp;
744 union hv_synic_siefp siefp;
745 + int cpu = smp_processor_id();
746
747 DPRINT_ENTER(VMBUS);
748
749 @@ -539,6 +538,7 @@ void HvSynicCleanup(void)
750
751 sharedSint.Masked = 1;
752
753 + /* Need to correctly cleanup in the case of SMP!!! */
754 /* Disable the interrupt */
755 wrmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, sharedSint.AsUINT64);
756
757 @@ -560,8 +560,8 @@ void HvSynicCleanup(void)
758
759 wrmsrl(HV_X64_MSR_SIEFP, siefp.AsUINT64);
760
761 - osd_PageFree(gHvContext.synICMessagePage[0], 1);
762 - osd_PageFree(gHvContext.synICEventPage[0], 1);
763 + osd_PageFree(gHvContext.synICMessagePage[cpu], 1);
764 + osd_PageFree(gHvContext.synICEventPage[cpu], 1);
765 }
766
767 DPRINT_EXIT(VMBUS);
768 diff --git a/drivers/staging/hv/Hv.h b/drivers/staging/hv/Hv.h
769 index 5379e4b..fce4b5c 100644
770 --- a/drivers/staging/hv/Hv.h
771 +++ b/drivers/staging/hv/Hv.h
772 @@ -93,7 +93,7 @@ static const struct hv_guid VMBUS_SERVICE_ID = {
773 },
774 };
775
776 -#define MAX_NUM_CPUS 1
777 +#define MAX_NUM_CPUS 32
778
779
780 struct hv_input_signal_event_buffer {
781 @@ -137,8 +137,8 @@ extern u16 HvPostMessage(union hv_connection_id connectionId,
782
783 extern u16 HvSignalEvent(void);
784
785 -extern int HvSynicInit(u32 irqVector);
786 +extern void HvSynicInit(void *irqarg);
787
788 -extern void HvSynicCleanup(void);
789 +extern void HvSynicCleanup(void *arg);
790
791 #endif /* __HV_H__ */
792 diff --git a/drivers/staging/hv/Vmbus.c b/drivers/staging/hv/Vmbus.c
793 index a4dd06f..35a023e 100644
794 --- a/drivers/staging/hv/Vmbus.c
795 +++ b/drivers/staging/hv/Vmbus.c
796 @@ -129,7 +129,7 @@ static int VmbusOnDeviceAdd(struct hv_device *dev, void *AdditionalInfo)
797
798 /* strcpy(dev->name, "vmbus"); */
799 /* SynIC setup... */
800 - ret = HvSynicInit(*irqvector);
801 + on_each_cpu(HvSynicInit, (void *)irqvector, 1);
802
803 /* Connect to VMBus in the root partition */
804 ret = VmbusConnect();
805 @@ -150,7 +150,7 @@ static int VmbusOnDeviceRemove(struct hv_device *dev)
806 DPRINT_ENTER(VMBUS);
807 VmbusChannelReleaseUnattachedChannels();
808 VmbusDisconnect();
809 - HvSynicCleanup();
810 + on_each_cpu(HvSynicCleanup, NULL, 1);
811 DPRINT_EXIT(VMBUS);
812
813 return ret;
814 @@ -173,7 +173,8 @@ static void VmbusOnCleanup(struct hv_driver *drv)
815 */
816 static void VmbusOnMsgDPC(struct hv_driver *drv)
817 {
818 - void *page_addr = gHvContext.synICMessagePage[0];
819 + int cpu = smp_processor_id();
820 + void *page_addr = gHvContext.synICMessagePage[cpu];
821 struct hv_message *msg = (struct hv_message *)page_addr +
822 VMBUS_MESSAGE_SINT;
823 struct hv_message *copied;
824 @@ -230,11 +231,12 @@ static void VmbusOnEventDPC(struct hv_driver *drv)
825 static int VmbusOnISR(struct hv_driver *drv)
826 {
827 int ret = 0;
828 + int cpu = smp_processor_id();
829 void *page_addr;
830 struct hv_message *msg;
831 union hv_synic_event_flags *event;
832
833 - page_addr = gHvContext.synICMessagePage[0];
834 + page_addr = gHvContext.synICMessagePage[cpu];
835 msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
836
837 DPRINT_ENTER(VMBUS);
838 @@ -248,7 +250,7 @@ static int VmbusOnISR(struct hv_driver *drv)
839 }
840
841 /* TODO: Check if there are events to be process */
842 - page_addr = gHvContext.synICEventPage[0];
843 + page_addr = gHvContext.synICEventPage[cpu];
844 event = (union hv_synic_event_flags *)page_addr + VMBUS_MESSAGE_SINT;
845
846 /* Since we are a child, we only need to check bit 0 */
847 diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
848 index 96f1171..355dffc 100644
849 --- a/drivers/usb/core/devices.c
850 +++ b/drivers/usb/core/devices.c
851 @@ -494,7 +494,7 @@ static ssize_t usb_device_dump(char __user **buffer, size_t *nbytes,
852 return 0;
853 /* allocate 2^1 pages = 8K (on i386);
854 * should be more than enough for one device */
855 - pages_start = (char *)__get_free_pages(GFP_KERNEL, 1);
856 + pages_start = (char *)__get_free_pages(GFP_NOIO, 1);
857 if (!pages_start)
858 return -ENOMEM;
859
860 diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
861 index 8b0c235..1a7d54b 100644
862 --- a/drivers/usb/core/hub.c
863 +++ b/drivers/usb/core/hub.c
864 @@ -3286,6 +3286,9 @@ static void hub_events(void)
865 USB_PORT_FEAT_C_SUSPEND);
866 udev = hdev->children[i-1];
867 if (udev) {
868 + /* TRSMRCY = 10 msec */
869 + msleep(10);
870 +
871 usb_lock_device(udev);
872 ret = remote_wakeup(hdev->
873 children[i-1]);
874 diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
875 index da718e8..980a8d2 100644
876 --- a/drivers/usb/core/message.c
877 +++ b/drivers/usb/core/message.c
878 @@ -911,11 +911,11 @@ char *usb_cache_string(struct usb_device *udev, int index)
879 if (index <= 0)
880 return NULL;
881
882 - buf = kmalloc(MAX_USB_STRING_SIZE, GFP_KERNEL);
883 + buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
884 if (buf) {
885 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
886 if (len > 0) {
887 - smallbuf = kmalloc(++len, GFP_KERNEL);
888 + smallbuf = kmalloc(++len, GFP_NOIO);
889 if (!smallbuf)
890 return buf;
891 memcpy(smallbuf, buf, len);
892 @@ -1682,7 +1682,7 @@ int usb_set_configuration(struct usb_device *dev, int configuration)
893 if (cp) {
894 nintf = cp->desc.bNumInterfaces;
895 new_interfaces = kmalloc(nintf * sizeof(*new_interfaces),
896 - GFP_KERNEL);
897 + GFP_NOIO);
898 if (!new_interfaces) {
899 dev_err(&dev->dev, "Out of memory\n");
900 return -ENOMEM;
901 @@ -1691,7 +1691,7 @@ int usb_set_configuration(struct usb_device *dev, int configuration)
902 for (; n < nintf; ++n) {
903 new_interfaces[n] = kzalloc(
904 sizeof(struct usb_interface),
905 - GFP_KERNEL);
906 + GFP_NOIO);
907 if (!new_interfaces[n]) {
908 dev_err(&dev->dev, "Out of memory\n");
909 ret = -ENOMEM;
910 diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
911 index 8752e55..fcdcad4 100644
912 --- a/drivers/usb/core/sysfs.c
913 +++ b/drivers/usb/core/sysfs.c
914 @@ -115,6 +115,12 @@ show_speed(struct device *dev, struct device_attribute *attr, char *buf)
915 case USB_SPEED_HIGH:
916 speed = "480";
917 break;
918 + case USB_SPEED_VARIABLE:
919 + speed = "480";
920 + break;
921 + case USB_SPEED_SUPER:
922 + speed = "5000";
923 + break;
924 default:
925 speed = "unknown";
926 }
927 diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
928 index f5f5601..e18c677 100644
929 --- a/drivers/usb/host/ehci-hcd.c
930 +++ b/drivers/usb/host/ehci-hcd.c
931 @@ -785,9 +785,10 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
932
933 /* start 20 msec resume signaling from this port,
934 * and make khubd collect PORT_STAT_C_SUSPEND to
935 - * stop that signaling.
936 + * stop that signaling. Use 5 ms extra for safety,
937 + * like usb_port_resume() does.
938 */
939 - ehci->reset_done [i] = jiffies + msecs_to_jiffies (20);
940 + ehci->reset_done[i] = jiffies + msecs_to_jiffies(25);
941 ehci_dbg (ehci, "port %d remote wakeup\n", i + 1);
942 mod_timer(&hcd->rh_timer, ehci->reset_done[i]);
943 }
944 diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
945 index 1b6f1c0..698f461 100644
946 --- a/drivers/usb/host/ehci-hub.c
947 +++ b/drivers/usb/host/ehci-hub.c
948 @@ -120,9 +120,26 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
949 del_timer_sync(&ehci->watchdog);
950 del_timer_sync(&ehci->iaa_watchdog);
951
952 - port = HCS_N_PORTS (ehci->hcs_params);
953 spin_lock_irq (&ehci->lock);
954
955 + /* Once the controller is stopped, port resumes that are already
956 + * in progress won't complete. Hence if remote wakeup is enabled
957 + * for the root hub and any ports are in the middle of a resume or
958 + * remote wakeup, we must fail the suspend.
959 + */
960 + if (hcd->self.root_hub->do_remote_wakeup) {
961 + port = HCS_N_PORTS(ehci->hcs_params);
962 + while (port--) {
963 + if (ehci->reset_done[port] != 0) {
964 + spin_unlock_irq(&ehci->lock);
965 + ehci_dbg(ehci, "suspend failed because "
966 + "port %d is resuming\n",
967 + port + 1);
968 + return -EBUSY;
969 + }
970 + }
971 + }
972 +
973 /* stop schedules, clean any completed work */
974 if (HC_IS_RUNNING(hcd->state)) {
975 ehci_quiesce (ehci);
976 @@ -138,6 +155,7 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
977 */
978 ehci->bus_suspended = 0;
979 ehci->owned_ports = 0;
980 + port = HCS_N_PORTS(ehci->hcs_params);
981 while (port--) {
982 u32 __iomem *reg = &ehci->regs->port_status [port];
983 u32 t1 = ehci_readl(ehci, reg) & ~PORT_RWC_BITS;
984 diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
985 index 139a2cc..c0d4b39 100644
986 --- a/drivers/usb/host/ehci-q.c
987 +++ b/drivers/usb/host/ehci-q.c
988 @@ -827,9 +827,10 @@ qh_make (
989 * But interval 1 scheduling is simpler, and
990 * includes high bandwidth.
991 */
992 - dbg ("intr period %d uframes, NYET!",
993 - urb->interval);
994 - goto done;
995 + urb->interval = 1;
996 + } else if (qh->period > ehci->periodic_size) {
997 + qh->period = ehci->periodic_size;
998 + urb->interval = qh->period << 3;
999 }
1000 } else {
1001 int think_time;
1002 @@ -852,6 +853,10 @@ qh_make (
1003 usb_calc_bus_time (urb->dev->speed,
1004 is_input, 0, max_packet (maxp)));
1005 qh->period = urb->interval;
1006 + if (qh->period > ehci->periodic_size) {
1007 + qh->period = ehci->periodic_size;
1008 + urb->interval = qh->period;
1009 + }
1010 }
1011 }
1012
1013 diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
1014 index 5cd0e48..99cd00f 100644
1015 --- a/drivers/usb/host/uhci-hcd.c
1016 +++ b/drivers/usb/host/uhci-hcd.c
1017 @@ -749,7 +749,20 @@ static int uhci_rh_suspend(struct usb_hcd *hcd)
1018 spin_lock_irq(&uhci->lock);
1019 if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags))
1020 rc = -ESHUTDOWN;
1021 - else if (!uhci->dead)
1022 + else if (uhci->dead)
1023 + ; /* Dead controllers tell no tales */
1024 +
1025 + /* Once the controller is stopped, port resumes that are already
1026 + * in progress won't complete. Hence if remote wakeup is enabled
1027 + * for the root hub and any ports are in the middle of a resume or
1028 + * remote wakeup, we must fail the suspend.
1029 + */
1030 + else if (hcd->self.root_hub->do_remote_wakeup &&
1031 + uhci->resuming_ports) {
1032 + dev_dbg(uhci_dev(uhci), "suspend failed because a port "
1033 + "is resuming\n");
1034 + rc = -EBUSY;
1035 + } else
1036 suspend_rh(uhci, UHCI_RH_SUSPENDED);
1037 spin_unlock_irq(&uhci->lock);
1038 return rc;
1039 diff --git a/drivers/usb/host/uhci-hub.c b/drivers/usb/host/uhci-hub.c
1040 index 885b585..8270055 100644
1041 --- a/drivers/usb/host/uhci-hub.c
1042 +++ b/drivers/usb/host/uhci-hub.c
1043 @@ -167,7 +167,7 @@ static void uhci_check_ports(struct uhci_hcd *uhci)
1044 /* Port received a wakeup request */
1045 set_bit(port, &uhci->resuming_ports);
1046 uhci->ports_timeout = jiffies +
1047 - msecs_to_jiffies(20);
1048 + msecs_to_jiffies(25);
1049
1050 /* Make sure we see the port again
1051 * after the resuming period is over. */
1052 diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
1053 index bbe005c..e0fb294 100644
1054 --- a/drivers/usb/serial/generic.c
1055 +++ b/drivers/usb/serial/generic.c
1056 @@ -489,6 +489,8 @@ void usb_serial_generic_write_bulk_callback(struct urb *urb)
1057 dbg("%s - port %d", __func__, port->number);
1058
1059 if (port->serial->type->max_in_flight_urbs) {
1060 + kfree(urb->transfer_buffer);
1061 +
1062 spin_lock_irqsave(&port->lock, flags);
1063 --port->urbs_in_flight;
1064 port->tx_bytes_flight -= urb->transfer_buffer_length;
1065 diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
1066 index 64a0a2c..c932f90 100644
1067 --- a/drivers/usb/storage/unusual_devs.h
1068 +++ b/drivers/usb/storage/unusual_devs.h
1069 @@ -1807,13 +1807,6 @@ UNUSUAL_DEV( 0x2735, 0x100b, 0x0000, 0x9999,
1070 US_SC_DEVICE, US_PR_DEVICE, NULL,
1071 US_FL_GO_SLOW ),
1072
1073 -/* Reported by Rohan Hart <rohan.hart17@gmail.com> */
1074 -UNUSUAL_DEV( 0x2770, 0x915d, 0x0010, 0x0010,
1075 - "INTOVA",
1076 - "Pixtreme",
1077 - US_SC_DEVICE, US_PR_DEVICE, NULL,
1078 - US_FL_FIX_CAPACITY ),
1079 -
1080 /* Reported by Frederic Marchal <frederic.marchal@wowcompany.com>
1081 * Mio Moov 330
1082 */
1083 diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
1084 index 716c8d7..33197fa 100644
1085 --- a/drivers/usb/storage/usb.c
1086 +++ b/drivers/usb/storage/usb.c
1087 @@ -430,7 +430,8 @@ static void adjust_quirks(struct us_data *us)
1088 u16 vid = le16_to_cpu(us->pusb_dev->descriptor.idVendor);
1089 u16 pid = le16_to_cpu(us->pusb_dev->descriptor.idProduct);
1090 unsigned f = 0;
1091 - unsigned int mask = (US_FL_SANE_SENSE | US_FL_FIX_CAPACITY |
1092 + unsigned int mask = (US_FL_SANE_SENSE | US_FL_BAD_SENSE |
1093 + US_FL_FIX_CAPACITY |
1094 US_FL_CAPACITY_HEURISTICS | US_FL_IGNORE_DEVICE |
1095 US_FL_NOT_LOCKABLE | US_FL_MAX_SECTORS_64 |
1096 US_FL_CAPACITY_OK | US_FL_IGNORE_RESIDUE |
1097 diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
1098 index fbb6e5e..7cb0a59 100644
1099 --- a/fs/ecryptfs/crypto.c
1100 +++ b/fs/ecryptfs/crypto.c
1101 @@ -1748,7 +1748,7 @@ ecryptfs_process_key_cipher(struct crypto_blkcipher **key_tfm,
1102 char *cipher_name, size_t *key_size)
1103 {
1104 char dummy_key[ECRYPTFS_MAX_KEY_BYTES];
1105 - char *full_alg_name;
1106 + char *full_alg_name = NULL;
1107 int rc;
1108
1109 *key_tfm = NULL;
1110 @@ -1763,7 +1763,6 @@ ecryptfs_process_key_cipher(struct crypto_blkcipher **key_tfm,
1111 if (rc)
1112 goto out;
1113 *key_tfm = crypto_alloc_blkcipher(full_alg_name, 0, CRYPTO_ALG_ASYNC);
1114 - kfree(full_alg_name);
1115 if (IS_ERR(*key_tfm)) {
1116 rc = PTR_ERR(*key_tfm);
1117 printk(KERN_ERR "Unable to allocate crypto cipher with name "
1118 @@ -1786,6 +1785,7 @@ ecryptfs_process_key_cipher(struct crypto_blkcipher **key_tfm,
1119 goto out;
1120 }
1121 out:
1122 + kfree(full_alg_name);
1123 return rc;
1124 }
1125
1126 diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
1127 index 9e94405..1744f17 100644
1128 --- a/fs/ecryptfs/file.c
1129 +++ b/fs/ecryptfs/file.c
1130 @@ -191,13 +191,6 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
1131 | ECRYPTFS_ENCRYPTED);
1132 }
1133 mutex_unlock(&crypt_stat->cs_mutex);
1134 - if ((ecryptfs_inode_to_private(inode)->lower_file->f_flags & O_RDONLY)
1135 - && !(file->f_flags & O_RDONLY)) {
1136 - rc = -EPERM;
1137 - printk(KERN_WARNING "%s: Lower persistent file is RO; eCryptfs "
1138 - "file must hence be opened RO\n", __func__);
1139 - goto out;
1140 - }
1141 if (!ecryptfs_inode_to_private(inode)->lower_file) {
1142 rc = ecryptfs_init_persistent_file(ecryptfs_dentry);
1143 if (rc) {
1144 @@ -208,6 +201,13 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
1145 goto out;
1146 }
1147 }
1148 + if ((ecryptfs_inode_to_private(inode)->lower_file->f_flags & O_RDONLY)
1149 + && !(file->f_flags & O_RDONLY)) {
1150 + rc = -EPERM;
1151 + printk(KERN_WARNING "%s: Lower persistent file is RO; eCryptfs "
1152 + "file must hence be opened RO\n", __func__);
1153 + goto out;
1154 + }
1155 ecryptfs_set_file_lower(
1156 file, ecryptfs_inode_to_private(inode)->lower_file);
1157 if (S_ISDIR(ecryptfs_dentry->d_inode->i_mode)) {
1158 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
1159 index adf99c6..912b8ff 100644
1160 --- a/include/linux/blkdev.h
1161 +++ b/include/linux/blkdev.h
1162 @@ -942,6 +942,8 @@ extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
1163 extern void blk_set_default_limits(struct queue_limits *lim);
1164 extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
1165 sector_t offset);
1166 +extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
1167 + sector_t offset);
1168 extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
1169 sector_t offset);
1170 extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
1171 diff --git a/include/linux/enclosure.h b/include/linux/enclosure.h
1172 index 90d1c21..9a33c5f 100644
1173 --- a/include/linux/enclosure.h
1174 +++ b/include/linux/enclosure.h
1175 @@ -42,6 +42,8 @@ enum enclosure_status {
1176 ENCLOSURE_STATUS_NOT_INSTALLED,
1177 ENCLOSURE_STATUS_UNKNOWN,
1178 ENCLOSURE_STATUS_UNAVAILABLE,
1179 + /* last element for counting purposes */
1180 + ENCLOSURE_STATUS_MAX
1181 };
1182
1183 /* SFF-8485 activity light settings */
1184 diff --git a/kernel/perf_event.c b/kernel/perf_event.c
1185 index 6eee915..413d101 100644
1186 --- a/kernel/perf_event.c
1187 +++ b/kernel/perf_event.c
1188 @@ -1359,6 +1359,9 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
1189 if (event->state != PERF_EVENT_STATE_ACTIVE)
1190 continue;
1191
1192 + if (event->cpu != -1 && event->cpu != smp_processor_id())
1193 + continue;
1194 +
1195 hwc = &event->hw;
1196
1197 interrupts = hwc->interrupts;
1198 @@ -3226,6 +3229,12 @@ static void perf_event_task_output(struct perf_event *event,
1199
1200 static int perf_event_task_match(struct perf_event *event)
1201 {
1202 + if (event->state != PERF_EVENT_STATE_ACTIVE)
1203 + return 0;
1204 +
1205 + if (event->cpu != -1 && event->cpu != smp_processor_id())
1206 + return 0;
1207 +
1208 if (event->attr.comm || event->attr.mmap || event->attr.task)
1209 return 1;
1210
1211 @@ -3255,13 +3264,13 @@ static void perf_event_task_event(struct perf_task_event *task_event)
1212
1213 cpuctx = &get_cpu_var(perf_cpu_context);
1214 perf_event_task_ctx(&cpuctx->ctx, task_event);
1215 - put_cpu_var(perf_cpu_context);
1216
1217 rcu_read_lock();
1218 if (!ctx)
1219 ctx = rcu_dereference(task_event->task->perf_event_ctxp);
1220 if (ctx)
1221 perf_event_task_ctx(ctx, task_event);
1222 + put_cpu_var(perf_cpu_context);
1223 rcu_read_unlock();
1224 }
1225
1226 @@ -3338,6 +3347,12 @@ static void perf_event_comm_output(struct perf_event *event,
1227
1228 static int perf_event_comm_match(struct perf_event *event)
1229 {
1230 + if (event->state != PERF_EVENT_STATE_ACTIVE)
1231 + return 0;
1232 +
1233 + if (event->cpu != -1 && event->cpu != smp_processor_id())
1234 + return 0;
1235 +
1236 if (event->attr.comm)
1237 return 1;
1238
1239 @@ -3378,7 +3393,6 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event)
1240
1241 cpuctx = &get_cpu_var(perf_cpu_context);
1242 perf_event_comm_ctx(&cpuctx->ctx, comm_event);
1243 - put_cpu_var(perf_cpu_context);
1244
1245 rcu_read_lock();
1246 /*
1247 @@ -3388,6 +3402,7 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event)
1248 ctx = rcu_dereference(current->perf_event_ctxp);
1249 if (ctx)
1250 perf_event_comm_ctx(ctx, comm_event);
1251 + put_cpu_var(perf_cpu_context);
1252 rcu_read_unlock();
1253 }
1254
1255 @@ -3462,6 +3477,12 @@ static void perf_event_mmap_output(struct perf_event *event,
1256 static int perf_event_mmap_match(struct perf_event *event,
1257 struct perf_mmap_event *mmap_event)
1258 {
1259 + if (event->state != PERF_EVENT_STATE_ACTIVE)
1260 + return 0;
1261 +
1262 + if (event->cpu != -1 && event->cpu != smp_processor_id())
1263 + return 0;
1264 +
1265 if (event->attr.mmap)
1266 return 1;
1267
1268 @@ -3539,7 +3560,6 @@ got_name:
1269
1270 cpuctx = &get_cpu_var(perf_cpu_context);
1271 perf_event_mmap_ctx(&cpuctx->ctx, mmap_event);
1272 - put_cpu_var(perf_cpu_context);
1273
1274 rcu_read_lock();
1275 /*
1276 @@ -3549,6 +3569,7 @@ got_name:
1277 ctx = rcu_dereference(current->perf_event_ctxp);
1278 if (ctx)
1279 perf_event_mmap_ctx(ctx, mmap_event);
1280 + put_cpu_var(perf_cpu_context);
1281 rcu_read_unlock();
1282
1283 kfree(buf);
1284 @@ -3811,6 +3832,9 @@ static int perf_swevent_match(struct perf_event *event,
1285 enum perf_type_id type,
1286 u32 event_id, struct pt_regs *regs)
1287 {
1288 + if (event->cpu != -1 && event->cpu != smp_processor_id())
1289 + return 0;
1290 +
1291 if (!perf_swevent_is_counting(event))
1292 return 0;
1293
1294 diff --git a/mm/vmalloc.c b/mm/vmalloc.c
1295 index 7758726..a3a99d3 100644
1296 --- a/mm/vmalloc.c
1297 +++ b/mm/vmalloc.c
1298 @@ -555,10 +555,8 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
1299 }
1300 rcu_read_unlock();
1301
1302 - if (nr) {
1303 - BUG_ON(nr > atomic_read(&vmap_lazy_nr));
1304 + if (nr)
1305 atomic_sub(nr, &vmap_lazy_nr);
1306 - }
1307
1308 if (nr || force_flush)
1309 flush_tlb_kernel_range(*start, *end);
1310 diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
1311 index e8a510d..4101afe 100644
1312 --- a/tools/perf/builtin-timechart.c
1313 +++ b/tools/perf/builtin-timechart.c
1314 @@ -275,7 +275,7 @@ static u64 cpus_pstate_state[MAX_CPUS];
1315 static int
1316 process_comm_event(event_t *event)
1317 {
1318 - pid_set_comm(event->comm.pid, event->comm.comm);
1319 + pid_set_comm(event->comm.tid, event->comm.comm);
1320 return 0;
1321 }
1322 static int

  ViewVC Help
Powered by ViewVC 1.1.20