/[linux-patches]/genpatches-2.6/tags/3.0-30/1037_linux-3.0.38.patch
Gentoo

Contents of /genpatches-2.6/tags/3.0-30/1037_linux-3.0.38.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2206 - (show annotations) (download)
Mon Sep 17 18:58:14 2012 UTC (2 years, 3 months ago) by mpagano
File size: 26410 byte(s)
3.0-30 release
1 diff --git a/Makefile b/Makefile
2 index 009160e..5fdfaa8 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 3
7 PATCHLEVEL = 0
8 -SUBLEVEL = 37
9 +SUBLEVEL = 38
10 EXTRAVERSION =
11 NAME = Sneaky Weasel
12
13 diff --git a/arch/arm/plat-samsung/adc.c b/arch/arm/plat-samsung/adc.c
14 index e8f2be2..df14954 100644
15 --- a/arch/arm/plat-samsung/adc.c
16 +++ b/arch/arm/plat-samsung/adc.c
17 @@ -143,11 +143,13 @@ int s3c_adc_start(struct s3c_adc_client *client,
18 return -EINVAL;
19 }
20
21 - if (client->is_ts && adc->ts_pend)
22 - return -EAGAIN;
23 -
24 spin_lock_irqsave(&adc->lock, flags);
25
26 + if (client->is_ts && adc->ts_pend) {
27 + spin_unlock_irqrestore(&adc->lock, flags);
28 + return -EAGAIN;
29 + }
30 +
31 client->channel = channel;
32 client->nr_samples = nr_samples;
33
34 diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
35 index 5f52477..b358c87 100644
36 --- a/drivers/hwmon/it87.c
37 +++ b/drivers/hwmon/it87.c
38 @@ -2057,7 +2057,7 @@ static void __devinit it87_init_device(struct platform_device *pdev)
39
40 /* Start monitoring */
41 it87_write_value(data, IT87_REG_CONFIG,
42 - (it87_read_value(data, IT87_REG_CONFIG) & 0x36)
43 + (it87_read_value(data, IT87_REG_CONFIG) & 0x3e)
44 | (update_vbat ? 0x41 : 0x01));
45 }
46
47 diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
48 index 56abf3d..92c7be1 100644
49 --- a/drivers/input/joystick/xpad.c
50 +++ b/drivers/input/joystick/xpad.c
51 @@ -142,6 +142,7 @@ static const struct xpad_device {
52 { 0x0c12, 0x880a, "Pelican Eclipse PL-2023", 0, XTYPE_XBOX },
53 { 0x0c12, 0x8810, "Zeroplus Xbox Controller", 0, XTYPE_XBOX },
54 { 0x0c12, 0x9902, "HAMA VibraX - *FAULTY HARDWARE*", 0, XTYPE_XBOX },
55 + { 0x0d2f, 0x0002, "Andamiro Pump It Up pad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
56 { 0x0e4c, 0x1097, "Radica Gamester Controller", 0, XTYPE_XBOX },
57 { 0x0e4c, 0x2390, "Radica Games Jtech Controller", 0, XTYPE_XBOX },
58 { 0x0e6f, 0x0003, "Logic3 Freebird wireless Controller", 0, XTYPE_XBOX },
59 diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
60 index f732877..d5cda35 100644
61 --- a/drivers/media/dvb/dvb-core/dvbdev.c
62 +++ b/drivers/media/dvb/dvb-core/dvbdev.c
63 @@ -243,6 +243,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
64 if (minor == MAX_DVB_MINORS) {
65 kfree(dvbdevfops);
66 kfree(dvbdev);
67 + up_write(&minor_rwsem);
68 mutex_unlock(&dvbdev_register_lock);
69 return -EINVAL;
70 }
71 diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
72 index 357e8c5..63c8048 100644
73 --- a/drivers/mtd/nand/nandsim.c
74 +++ b/drivers/mtd/nand/nandsim.c
75 @@ -28,7 +28,7 @@
76 #include <linux/module.h>
77 #include <linux/moduleparam.h>
78 #include <linux/vmalloc.h>
79 -#include <asm/div64.h>
80 +#include <linux/math64.h>
81 #include <linux/slab.h>
82 #include <linux/errno.h>
83 #include <linux/string.h>
84 @@ -547,12 +547,6 @@ static char *get_partition_name(int i)
85 return kstrdup(buf, GFP_KERNEL);
86 }
87
88 -static uint64_t divide(uint64_t n, uint32_t d)
89 -{
90 - do_div(n, d);
91 - return n;
92 -}
93 -
94 /*
95 * Initialize the nandsim structure.
96 *
97 @@ -581,7 +575,7 @@ static int init_nandsim(struct mtd_info *mtd)
98 ns->geom.oobsz = mtd->oobsize;
99 ns->geom.secsz = mtd->erasesize;
100 ns->geom.pgszoob = ns->geom.pgsz + ns->geom.oobsz;
101 - ns->geom.pgnum = divide(ns->geom.totsz, ns->geom.pgsz);
102 + ns->geom.pgnum = div_u64(ns->geom.totsz, ns->geom.pgsz);
103 ns->geom.totszoob = ns->geom.totsz + (uint64_t)ns->geom.pgnum * ns->geom.oobsz;
104 ns->geom.secshift = ffs(ns->geom.secsz) - 1;
105 ns->geom.pgshift = chip->page_shift;
106 @@ -924,7 +918,7 @@ static int setup_wear_reporting(struct mtd_info *mtd)
107
108 if (!rptwear)
109 return 0;
110 - wear_eb_count = divide(mtd->size, mtd->erasesize);
111 + wear_eb_count = div_u64(mtd->size, mtd->erasesize);
112 mem = wear_eb_count * sizeof(unsigned long);
113 if (mem / sizeof(unsigned long) != wear_eb_count) {
114 NS_ERR("Too many erase blocks for wear reporting\n");
115 diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
116 index 8295f21..5278e84 100644
117 --- a/drivers/net/e1000e/82571.c
118 +++ b/drivers/net/e1000e/82571.c
119 @@ -1573,6 +1573,9 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
120 ctrl = er32(CTRL);
121 status = er32(STATUS);
122 rxcw = er32(RXCW);
123 + /* SYNCH bit and IV bit are sticky */
124 + udelay(10);
125 + rxcw = er32(RXCW);
126
127 if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) {
128
129 diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
130 index 54f0b13..99fa416 100644
131 --- a/drivers/net/wireless/rt2x00/rt2x00usb.c
132 +++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
133 @@ -426,8 +426,8 @@ void rt2x00usb_kick_queue(struct data_queue *queue)
134 case QID_RX:
135 if (!rt2x00queue_full(queue))
136 rt2x00queue_for_each_entry(queue,
137 - Q_INDEX_DONE,
138 Q_INDEX,
139 + Q_INDEX_DONE,
140 NULL,
141 rt2x00usb_kick_rx_entry);
142 break;
143 diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c
144 index 5ffe7c3..e66bbba 100644
145 --- a/drivers/platform/x86/intel_ips.c
146 +++ b/drivers/platform/x86/intel_ips.c
147 @@ -72,6 +72,7 @@
148 #include <linux/string.h>
149 #include <linux/tick.h>
150 #include <linux/timer.h>
151 +#include <linux/dmi.h>
152 #include <drm/i915_drm.h>
153 #include <asm/msr.h>
154 #include <asm/processor.h>
155 @@ -1505,6 +1506,24 @@ static DEFINE_PCI_DEVICE_TABLE(ips_id_table) = {
156
157 MODULE_DEVICE_TABLE(pci, ips_id_table);
158
159 +static int ips_blacklist_callback(const struct dmi_system_id *id)
160 +{
161 + pr_info("Blacklisted intel_ips for %s\n", id->ident);
162 + return 1;
163 +}
164 +
165 +static const struct dmi_system_id ips_blacklist[] = {
166 + {
167 + .callback = ips_blacklist_callback,
168 + .ident = "HP ProBook",
169 + .matches = {
170 + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
171 + DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook"),
172 + },
173 + },
174 + { } /* terminating entry */
175 +};
176 +
177 static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id)
178 {
179 u64 platform_info;
180 @@ -1514,6 +1533,9 @@ static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id)
181 u16 htshi, trc, trc_required_mask;
182 u8 tse;
183
184 + if (dmi_check_system(ips_blacklist))
185 + return -ENODEV;
186 +
187 ips = kzalloc(sizeof(struct ips_driver), GFP_KERNEL);
188 if (!ips)
189 return -ENOMEM;
190 diff --git a/fs/buffer.c b/fs/buffer.c
191 index 330cbce..d421626 100644
192 --- a/fs/buffer.c
193 +++ b/fs/buffer.c
194 @@ -1084,6 +1084,9 @@ grow_buffers(struct block_device *bdev, sector_t block, int size)
195 static struct buffer_head *
196 __getblk_slow(struct block_device *bdev, sector_t block, int size)
197 {
198 + int ret;
199 + struct buffer_head *bh;
200 +
201 /* Size must be multiple of hard sectorsize */
202 if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
203 (size < 512 || size > PAGE_SIZE))) {
204 @@ -1096,20 +1099,21 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size)
205 return NULL;
206 }
207
208 - for (;;) {
209 - struct buffer_head * bh;
210 - int ret;
211 +retry:
212 + bh = __find_get_block(bdev, block, size);
213 + if (bh)
214 + return bh;
215
216 + ret = grow_buffers(bdev, block, size);
217 + if (ret == 0) {
218 + free_more_memory();
219 + goto retry;
220 + } else if (ret > 0) {
221 bh = __find_get_block(bdev, block, size);
222 if (bh)
223 return bh;
224 -
225 - ret = grow_buffers(bdev, block, size);
226 - if (ret < 0)
227 - return NULL;
228 - if (ret == 0)
229 - free_more_memory();
230 }
231 + return NULL;
232 }
233
234 /*
235 diff --git a/fs/fifo.c b/fs/fifo.c
236 index b1a524d..cf6f434 100644
237 --- a/fs/fifo.c
238 +++ b/fs/fifo.c
239 @@ -14,7 +14,7 @@
240 #include <linux/sched.h>
241 #include <linux/pipe_fs_i.h>
242
243 -static void wait_for_partner(struct inode* inode, unsigned int *cnt)
244 +static int wait_for_partner(struct inode* inode, unsigned int *cnt)
245 {
246 int cur = *cnt;
247
248 @@ -23,6 +23,7 @@ static void wait_for_partner(struct inode* inode, unsigned int *cnt)
249 if (signal_pending(current))
250 break;
251 }
252 + return cur == *cnt ? -ERESTARTSYS : 0;
253 }
254
255 static void wake_up_partner(struct inode* inode)
256 @@ -67,8 +68,7 @@ static int fifo_open(struct inode *inode, struct file *filp)
257 * seen a writer */
258 filp->f_version = pipe->w_counter;
259 } else {
260 - wait_for_partner(inode, &pipe->w_counter);
261 - if(signal_pending(current))
262 + if (wait_for_partner(inode, &pipe->w_counter))
263 goto err_rd;
264 }
265 }
266 @@ -90,8 +90,7 @@ static int fifo_open(struct inode *inode, struct file *filp)
267 wake_up_partner(inode);
268
269 if (!pipe->readers) {
270 - wait_for_partner(inode, &pipe->r_counter);
271 - if (signal_pending(current))
272 + if (wait_for_partner(inode, &pipe->r_counter))
273 goto err_wr;
274 }
275 break;
276 diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
277 index fd0dc30..cc07d27 100644
278 --- a/include/linux/hrtimer.h
279 +++ b/include/linux/hrtimer.h
280 @@ -165,6 +165,7 @@ enum hrtimer_base_type {
281 * @lock: lock protecting the base and associated clock bases
282 * and timers
283 * @active_bases: Bitfield to mark bases with active timers
284 + * @clock_was_set: Indicates that clock was set from irq context.
285 * @expires_next: absolute time of the next event which was scheduled
286 * via clock_set_next_event()
287 * @hres_active: State of high resolution mode
288 @@ -177,7 +178,8 @@ enum hrtimer_base_type {
289 */
290 struct hrtimer_cpu_base {
291 raw_spinlock_t lock;
292 - unsigned long active_bases;
293 + unsigned int active_bases;
294 + unsigned int clock_was_set;
295 #ifdef CONFIG_HIGH_RES_TIMERS
296 ktime_t expires_next;
297 int hres_active;
298 @@ -286,6 +288,8 @@ extern void hrtimer_peek_ahead_timers(void);
299 # define MONOTONIC_RES_NSEC HIGH_RES_NSEC
300 # define KTIME_MONOTONIC_RES KTIME_HIGH_RES
301
302 +extern void clock_was_set_delayed(void);
303 +
304 #else
305
306 # define MONOTONIC_RES_NSEC LOW_RES_NSEC
307 @@ -306,6 +310,9 @@ static inline int hrtimer_is_hres_active(struct hrtimer *timer)
308 {
309 return 0;
310 }
311 +
312 +static inline void clock_was_set_delayed(void) { }
313 +
314 #endif
315
316 extern void clock_was_set(void);
317 @@ -320,6 +327,7 @@ extern ktime_t ktime_get(void);
318 extern ktime_t ktime_get_real(void);
319 extern ktime_t ktime_get_boottime(void);
320 extern ktime_t ktime_get_monotonic_offset(void);
321 +extern ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot);
322
323 DECLARE_PER_CPU(struct tick_device, tick_cpu_device);
324
325 diff --git a/include/linux/timex.h b/include/linux/timex.h
326 index aa60fe7..08e90fb 100644
327 --- a/include/linux/timex.h
328 +++ b/include/linux/timex.h
329 @@ -266,7 +266,7 @@ static inline int ntp_synced(void)
330 /* Returns how long ticks are at present, in ns / 2^NTP_SCALE_SHIFT. */
331 extern u64 tick_length;
332
333 -extern void second_overflow(void);
334 +extern int second_overflow(unsigned long secs);
335 extern void update_ntp_one_tick(void);
336 extern int do_adjtimex(struct timex *);
337 extern void hardpps(const struct timespec *, const struct timespec *);
338 diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
339 index 2043c08..957869f 100644
340 --- a/kernel/hrtimer.c
341 +++ b/kernel/hrtimer.c
342 @@ -657,6 +657,14 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
343 return 0;
344 }
345
346 +static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
347 +{
348 + ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset;
349 + ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset;
350 +
351 + return ktime_get_update_offsets(offs_real, offs_boot);
352 +}
353 +
354 /*
355 * Retrigger next event is called after clock was set
356 *
357 @@ -665,22 +673,12 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
358 static void retrigger_next_event(void *arg)
359 {
360 struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases);
361 - struct timespec realtime_offset, xtim, wtm, sleep;
362
363 if (!hrtimer_hres_active())
364 return;
365
366 - /* Optimized out for !HIGH_RES */
367 - get_xtime_and_monotonic_and_sleep_offset(&xtim, &wtm, &sleep);
368 - set_normalized_timespec(&realtime_offset, -wtm.tv_sec, -wtm.tv_nsec);
369 -
370 - /* Adjust CLOCK_REALTIME offset */
371 raw_spin_lock(&base->lock);
372 - base->clock_base[HRTIMER_BASE_REALTIME].offset =
373 - timespec_to_ktime(realtime_offset);
374 - base->clock_base[HRTIMER_BASE_BOOTTIME].offset =
375 - timespec_to_ktime(sleep);
376 -
377 + hrtimer_update_base(base);
378 hrtimer_force_reprogram(base, 0);
379 raw_spin_unlock(&base->lock);
380 }
381 @@ -710,13 +708,25 @@ static int hrtimer_switch_to_hres(void)
382 base->clock_base[i].resolution = KTIME_HIGH_RES;
383
384 tick_setup_sched_timer();
385 -
386 /* "Retrigger" the interrupt to get things going */
387 retrigger_next_event(NULL);
388 local_irq_restore(flags);
389 return 1;
390 }
391
392 +/*
393 + * Called from timekeeping code to reprogramm the hrtimer interrupt
394 + * device. If called from the timer interrupt context we defer it to
395 + * softirq context.
396 + */
397 +void clock_was_set_delayed(void)
398 +{
399 + struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
400 +
401 + cpu_base->clock_was_set = 1;
402 + __raise_softirq_irqoff(HRTIMER_SOFTIRQ);
403 +}
404 +
405 #else
406
407 static inline int hrtimer_hres_active(void) { return 0; }
408 @@ -1250,11 +1260,10 @@ void hrtimer_interrupt(struct clock_event_device *dev)
409 cpu_base->nr_events++;
410 dev->next_event.tv64 = KTIME_MAX;
411
412 - entry_time = now = ktime_get();
413 + raw_spin_lock(&cpu_base->lock);
414 + entry_time = now = hrtimer_update_base(cpu_base);
415 retry:
416 expires_next.tv64 = KTIME_MAX;
417 -
418 - raw_spin_lock(&cpu_base->lock);
419 /*
420 * We set expires_next to KTIME_MAX here with cpu_base->lock
421 * held to prevent that a timer is enqueued in our queue via
422 @@ -1330,8 +1339,12 @@ retry:
423 * We need to prevent that we loop forever in the hrtimer
424 * interrupt routine. We give it 3 attempts to avoid
425 * overreacting on some spurious event.
426 + *
427 + * Acquire base lock for updating the offsets and retrieving
428 + * the current time.
429 */
430 - now = ktime_get();
431 + raw_spin_lock(&cpu_base->lock);
432 + now = hrtimer_update_base(cpu_base);
433 cpu_base->nr_retries++;
434 if (++retries < 3)
435 goto retry;
436 @@ -1343,6 +1356,7 @@ retry:
437 */
438 cpu_base->nr_hangs++;
439 cpu_base->hang_detected = 1;
440 + raw_spin_unlock(&cpu_base->lock);
441 delta = ktime_sub(now, entry_time);
442 if (delta.tv64 > cpu_base->max_hang_time.tv64)
443 cpu_base->max_hang_time = delta;
444 @@ -1395,6 +1409,13 @@ void hrtimer_peek_ahead_timers(void)
445
446 static void run_hrtimer_softirq(struct softirq_action *h)
447 {
448 + struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
449 +
450 + if (cpu_base->clock_was_set) {
451 + cpu_base->clock_was_set = 0;
452 + clock_was_set();
453 + }
454 +
455 hrtimer_peek_ahead_timers();
456 }
457
458 diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
459 index 4b85a7a..f1eb182 100644
460 --- a/kernel/time/ntp.c
461 +++ b/kernel/time/ntp.c
462 @@ -31,8 +31,6 @@ unsigned long tick_nsec;
463 u64 tick_length;
464 static u64 tick_length_base;
465
466 -static struct hrtimer leap_timer;
467 -
468 #define MAX_TICKADJ 500LL /* usecs */
469 #define MAX_TICKADJ_SCALED \
470 (((MAX_TICKADJ * NSEC_PER_USEC) << NTP_SCALE_SHIFT) / NTP_INTERVAL_FREQ)
471 @@ -350,60 +348,60 @@ void ntp_clear(void)
472 }
473
474 /*
475 - * Leap second processing. If in leap-insert state at the end of the
476 - * day, the system clock is set back one second; if in leap-delete
477 - * state, the system clock is set ahead one second.
478 + * this routine handles the overflow of the microsecond field
479 + *
480 + * The tricky bits of code to handle the accurate clock support
481 + * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame.
482 + * They were originally developed for SUN and DEC kernels.
483 + * All the kudos should go to Dave for this stuff.
484 + *
485 + * Also handles leap second processing, and returns leap offset
486 */
487 -static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer)
488 +int second_overflow(unsigned long secs)
489 {
490 - enum hrtimer_restart res = HRTIMER_NORESTART;
491 -
492 - write_seqlock(&xtime_lock);
493 + int leap = 0;
494 + s64 delta;
495
496 + /*
497 + * Leap second processing. If in leap-insert state at the end of the
498 + * day, the system clock is set back one second; if in leap-delete
499 + * state, the system clock is set ahead one second.
500 + */
501 switch (time_state) {
502 case TIME_OK:
503 + if (time_status & STA_INS)
504 + time_state = TIME_INS;
505 + else if (time_status & STA_DEL)
506 + time_state = TIME_DEL;
507 break;
508 case TIME_INS:
509 - timekeeping_leap_insert(-1);
510 - time_state = TIME_OOP;
511 - printk(KERN_NOTICE
512 - "Clock: inserting leap second 23:59:60 UTC\n");
513 - hrtimer_add_expires_ns(&leap_timer, NSEC_PER_SEC);
514 - res = HRTIMER_RESTART;
515 + if (secs % 86400 == 0) {
516 + leap = -1;
517 + time_state = TIME_OOP;
518 + time_tai++;
519 + printk(KERN_NOTICE
520 + "Clock: inserting leap second 23:59:60 UTC\n");
521 + }
522 break;
523 case TIME_DEL:
524 - timekeeping_leap_insert(1);
525 - time_tai--;
526 - time_state = TIME_WAIT;
527 - printk(KERN_NOTICE
528 - "Clock: deleting leap second 23:59:59 UTC\n");
529 + if ((secs + 1) % 86400 == 0) {
530 + leap = 1;
531 + time_tai--;
532 + time_state = TIME_WAIT;
533 + printk(KERN_NOTICE
534 + "Clock: deleting leap second 23:59:59 UTC\n");
535 + }
536 break;
537 case TIME_OOP:
538 - time_tai++;
539 time_state = TIME_WAIT;
540 - /* fall through */
541 + break;
542 +
543 case TIME_WAIT:
544 if (!(time_status & (STA_INS | STA_DEL)))
545 time_state = TIME_OK;
546 break;
547 }
548
549 - write_sequnlock(&xtime_lock);
550 -
551 - return res;
552 -}
553 -
554 -/*
555 - * this routine handles the overflow of the microsecond field
556 - *
557 - * The tricky bits of code to handle the accurate clock support
558 - * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame.
559 - * They were originally developed for SUN and DEC kernels.
560 - * All the kudos should go to Dave for this stuff.
561 - */
562 -void second_overflow(void)
563 -{
564 - s64 delta;
565
566 /* Bump the maxerror field */
567 time_maxerror += MAXFREQ / NSEC_PER_USEC;
568 @@ -423,23 +421,25 @@ void second_overflow(void)
569 pps_dec_valid();
570
571 if (!time_adjust)
572 - return;
573 + goto out;
574
575 if (time_adjust > MAX_TICKADJ) {
576 time_adjust -= MAX_TICKADJ;
577 tick_length += MAX_TICKADJ_SCALED;
578 - return;
579 + goto out;
580 }
581
582 if (time_adjust < -MAX_TICKADJ) {
583 time_adjust += MAX_TICKADJ;
584 tick_length -= MAX_TICKADJ_SCALED;
585 - return;
586 + goto out;
587 }
588
589 tick_length += (s64)(time_adjust * NSEC_PER_USEC / NTP_INTERVAL_FREQ)
590 << NTP_SCALE_SHIFT;
591 time_adjust = 0;
592 +out:
593 + return leap;
594 }
595
596 #ifdef CONFIG_GENERIC_CMOS_UPDATE
597 @@ -501,27 +501,6 @@ static void notify_cmos_timer(void)
598 static inline void notify_cmos_timer(void) { }
599 #endif
600
601 -/*
602 - * Start the leap seconds timer:
603 - */
604 -static inline void ntp_start_leap_timer(struct timespec *ts)
605 -{
606 - long now = ts->tv_sec;
607 -
608 - if (time_status & STA_INS) {
609 - time_state = TIME_INS;
610 - now += 86400 - now % 86400;
611 - hrtimer_start(&leap_timer, ktime_set(now, 0), HRTIMER_MODE_ABS);
612 -
613 - return;
614 - }
615 -
616 - if (time_status & STA_DEL) {
617 - time_state = TIME_DEL;
618 - now += 86400 - (now + 1) % 86400;
619 - hrtimer_start(&leap_timer, ktime_set(now, 0), HRTIMER_MODE_ABS);
620 - }
621 -}
622
623 /*
624 * Propagate a new txc->status value into the NTP state:
625 @@ -546,22 +525,6 @@ static inline void process_adj_status(struct timex *txc, struct timespec *ts)
626 time_status &= STA_RONLY;
627 time_status |= txc->status & ~STA_RONLY;
628
629 - switch (time_state) {
630 - case TIME_OK:
631 - ntp_start_leap_timer(ts);
632 - break;
633 - case TIME_INS:
634 - case TIME_DEL:
635 - time_state = TIME_OK;
636 - ntp_start_leap_timer(ts);
637 - case TIME_WAIT:
638 - if (!(time_status & (STA_INS | STA_DEL)))
639 - time_state = TIME_OK;
640 - break;
641 - case TIME_OOP:
642 - hrtimer_restart(&leap_timer);
643 - break;
644 - }
645 }
646 /*
647 * Called with the xtime lock held, so we can access and modify
648 @@ -643,9 +606,6 @@ int do_adjtimex(struct timex *txc)
649 (txc->tick < 900000/USER_HZ ||
650 txc->tick > 1100000/USER_HZ))
651 return -EINVAL;
652 -
653 - if (txc->modes & ADJ_STATUS && time_state != TIME_OK)
654 - hrtimer_cancel(&leap_timer);
655 }
656
657 if (txc->modes & ADJ_SETOFFSET) {
658 @@ -967,6 +927,4 @@ __setup("ntp_tick_adj=", ntp_tick_adj_setup);
659 void __init ntp_init(void)
660 {
661 ntp_clear();
662 - hrtimer_init(&leap_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
663 - leap_timer.function = ntp_leap_second;
664 }
665 diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
666 index 5f45831..678ae31 100644
667 --- a/kernel/time/timekeeping.c
668 +++ b/kernel/time/timekeeping.c
669 @@ -161,23 +161,43 @@ static struct timespec xtime __attribute__ ((aligned (16)));
670 static struct timespec wall_to_monotonic __attribute__ ((aligned (16)));
671 static struct timespec total_sleep_time;
672
673 +/* Offset clock monotonic -> clock realtime */
674 +static ktime_t offs_real;
675 +
676 +/* Offset clock monotonic -> clock boottime */
677 +static ktime_t offs_boot;
678 +
679 /*
680 * The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock.
681 */
682 static struct timespec raw_time;
683
684 -/* flag for if timekeeping is suspended */
685 -int __read_mostly timekeeping_suspended;
686 +/* must hold write on xtime_lock */
687 +static void update_rt_offset(void)
688 +{
689 + struct timespec tmp, *wtm = &wall_to_monotonic;
690
691 -/* must hold xtime_lock */
692 -void timekeeping_leap_insert(int leapsecond)
693 + set_normalized_timespec(&tmp, -wtm->tv_sec, -wtm->tv_nsec);
694 + offs_real = timespec_to_ktime(tmp);
695 +}
696 +
697 +/* must hold write on xtime_lock */
698 +static void timekeeping_update(bool clearntp)
699 {
700 - xtime.tv_sec += leapsecond;
701 - wall_to_monotonic.tv_sec -= leapsecond;
702 - update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
703 - timekeeper.mult);
704 + if (clearntp) {
705 + timekeeper.ntp_error = 0;
706 + ntp_clear();
707 + }
708 + update_rt_offset();
709 + update_vsyscall(&xtime, &wall_to_monotonic,
710 + timekeeper.clock, timekeeper.mult);
711 }
712
713 +
714 +
715 +/* flag for if timekeeping is suspended */
716 +int __read_mostly timekeeping_suspended;
717 +
718 /**
719 * timekeeping_forward_now - update clock to the current time
720 *
721 @@ -375,11 +395,7 @@ int do_settimeofday(const struct timespec *tv)
722
723 xtime = *tv;
724
725 - timekeeper.ntp_error = 0;
726 - ntp_clear();
727 -
728 - update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
729 - timekeeper.mult);
730 + timekeeping_update(true);
731
732 write_sequnlock_irqrestore(&xtime_lock, flags);
733
734 @@ -412,11 +428,7 @@ int timekeeping_inject_offset(struct timespec *ts)
735 xtime = timespec_add(xtime, *ts);
736 wall_to_monotonic = timespec_sub(wall_to_monotonic, *ts);
737
738 - timekeeper.ntp_error = 0;
739 - ntp_clear();
740 -
741 - update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
742 - timekeeper.mult);
743 + timekeeping_update(true);
744
745 write_sequnlock_irqrestore(&xtime_lock, flags);
746
747 @@ -591,6 +603,7 @@ void __init timekeeping_init(void)
748 }
749 set_normalized_timespec(&wall_to_monotonic,
750 -boot.tv_sec, -boot.tv_nsec);
751 + update_rt_offset();
752 total_sleep_time.tv_sec = 0;
753 total_sleep_time.tv_nsec = 0;
754 write_sequnlock_irqrestore(&xtime_lock, flags);
755 @@ -599,6 +612,12 @@ void __init timekeeping_init(void)
756 /* time in seconds when suspend began */
757 static struct timespec timekeeping_suspend_time;
758
759 +static void update_sleep_time(struct timespec t)
760 +{
761 + total_sleep_time = t;
762 + offs_boot = timespec_to_ktime(t);
763 +}
764 +
765 /**
766 * __timekeeping_inject_sleeptime - Internal function to add sleep interval
767 * @delta: pointer to a timespec delta value
768 @@ -610,7 +629,7 @@ static void __timekeeping_inject_sleeptime(struct timespec *delta)
769 {
770 xtime = timespec_add(xtime, *delta);
771 wall_to_monotonic = timespec_sub(wall_to_monotonic, *delta);
772 - total_sleep_time = timespec_add(total_sleep_time, *delta);
773 + update_sleep_time(timespec_add(total_sleep_time, *delta));
774 }
775
776
777 @@ -639,10 +658,7 @@ void timekeeping_inject_sleeptime(struct timespec *delta)
778
779 __timekeeping_inject_sleeptime(delta);
780
781 - timekeeper.ntp_error = 0;
782 - ntp_clear();
783 - update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
784 - timekeeper.mult);
785 + timekeeping_update(true);
786
787 write_sequnlock_irqrestore(&xtime_lock, flags);
788
789 @@ -677,6 +693,7 @@ static void timekeeping_resume(void)
790 timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock);
791 timekeeper.ntp_error = 0;
792 timekeeping_suspended = 0;
793 + timekeeping_update(false);
794 write_sequnlock_irqrestore(&xtime_lock, flags);
795
796 touch_softlockup_watchdog();
797 @@ -828,9 +845,14 @@ static cycle_t logarithmic_accumulation(cycle_t offset, int shift)
798
799 timekeeper.xtime_nsec += timekeeper.xtime_interval << shift;
800 while (timekeeper.xtime_nsec >= nsecps) {
801 + int leap;
802 timekeeper.xtime_nsec -= nsecps;
803 xtime.tv_sec++;
804 - second_overflow();
805 + leap = second_overflow(xtime.tv_sec);
806 + xtime.tv_sec += leap;
807 + wall_to_monotonic.tv_sec -= leap;
808 + if (leap)
809 + clock_was_set_delayed();
810 }
811
812 /* Accumulate raw time */
813 @@ -936,14 +958,17 @@ static void update_wall_time(void)
814 * xtime.tv_nsec isn't larger then NSEC_PER_SEC
815 */
816 if (unlikely(xtime.tv_nsec >= NSEC_PER_SEC)) {
817 + int leap;
818 xtime.tv_nsec -= NSEC_PER_SEC;
819 xtime.tv_sec++;
820 - second_overflow();
821 + leap = second_overflow(xtime.tv_sec);
822 + xtime.tv_sec += leap;
823 + wall_to_monotonic.tv_sec -= leap;
824 + if (leap)
825 + clock_was_set_delayed();
826 }
827
828 - /* check to see if there is a new clocksource to use */
829 - update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
830 - timekeeper.mult);
831 + timekeeping_update(false);
832 }
833
834 /**
835 @@ -1102,6 +1127,40 @@ void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim,
836 } while (read_seqretry(&xtime_lock, seq));
837 }
838
839 +#ifdef CONFIG_HIGH_RES_TIMERS
840 +/**
841 + * ktime_get_update_offsets - hrtimer helper
842 + * @real: pointer to storage for monotonic -> realtime offset
843 + * @_boot: pointer to storage for monotonic -> boottime offset
844 + *
845 + * Returns current monotonic time and updates the offsets
846 + * Called from hrtimer_interupt() or retrigger_next_event()
847 + */
848 +ktime_t ktime_get_update_offsets(ktime_t *real, ktime_t *boot)
849 +{
850 + ktime_t now;
851 + unsigned int seq;
852 + u64 secs, nsecs;
853 +
854 + do {
855 + seq = read_seqbegin(&xtime_lock);
856 +
857 + secs = xtime.tv_sec;
858 + nsecs = xtime.tv_nsec;
859 + nsecs += timekeeping_get_ns();
860 + /* If arch requires, add in gettimeoffset() */
861 + nsecs += arch_gettimeoffset();
862 +
863 + *real = offs_real;
864 + *boot = offs_boot;
865 + } while (read_seqretry(&xtime_lock, seq));
866 +
867 + now = ktime_add_ns(ktime_set(secs, 0), nsecs);
868 + now = ktime_sub(now, *real);
869 + return now;
870 +}
871 +#endif
872 +
873 /**
874 * ktime_get_monotonic_offset() - get wall_to_monotonic in ktime_t format
875 */
876 diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
877 index 7410a8c..6e33b79c 100644
878 --- a/net/ipv4/tcp_input.c
879 +++ b/net/ipv4/tcp_input.c
880 @@ -5761,6 +5761,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
881 goto discard;
882
883 if (th->syn) {
884 + if (th->fin)
885 + goto discard;
886 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
887 return 1;
888
889 diff --git a/net/wireless/util.c b/net/wireless/util.c
890 index 30f68dc..bbcb58e 100644
891 --- a/net/wireless/util.c
892 +++ b/net/wireless/util.c
893 @@ -807,7 +807,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
894 ntype == NL80211_IFTYPE_P2P_CLIENT))
895 return -EBUSY;
896
897 - if (ntype != otype) {
898 + if (ntype != otype && netif_running(dev)) {
899 err = cfg80211_can_change_interface(rdev, dev->ieee80211_ptr,
900 ntype);
901 if (err)

  ViewVC Help
Powered by ViewVC 1.1.20