/[linux-patches]/genpatches-2.6/trunk/2.6.30/1001_linux-2.6.30.2.patch
Gentoo

Contents of /genpatches-2.6/trunk/2.6.30/1001_linux-2.6.30.2.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1589 - (show annotations) (download)
Mon Jul 20 20:03:14 2009 UTC (9 years, 4 months ago) by mpagano
File size: 24027 byte(s)
Linux patch 2.6.30.2 and removal of redundant patches
1 diff --git a/arch/alpha/include/asm/percpu.h b/arch/alpha/include/asm/percpu.h
2 index 06c5c7a..b663f1f 100644
3 --- a/arch/alpha/include/asm/percpu.h
4 +++ b/arch/alpha/include/asm/percpu.h
5 @@ -30,7 +30,7 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
6
7 #ifndef MODULE
8 #define SHIFT_PERCPU_PTR(var, offset) RELOC_HIDE(&per_cpu_var(var), (offset))
9 -#define PER_CPU_ATTRIBUTES
10 +#define PER_CPU_DEF_ATTRIBUTES
11 #else
12 /*
13 * To calculate addresses of locally defined variables, GCC uses 32-bit
14 @@ -49,7 +49,7 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
15 : "=&r"(__ptr), "=&r"(tmp_gp)); \
16 (typeof(&per_cpu_var(var)))(__ptr + (offset)); })
17
18 -#define PER_CPU_ATTRIBUTES __used
19 +#define PER_CPU_DEF_ATTRIBUTES __used
20
21 #endif /* MODULE */
22
23 @@ -71,7 +71,7 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
24 #define __get_cpu_var(var) per_cpu_var(var)
25 #define __raw_get_cpu_var(var) per_cpu_var(var)
26
27 -#define PER_CPU_ATTRIBUTES
28 +#define PER_CPU_DEF_ATTRIBUTES
29
30 #endif /* SMP */
31
32 diff --git a/arch/blackfin/kernel/irqchip.c b/arch/blackfin/kernel/irqchip.c
33 index 401bd32..6ab0532 100644
34 --- a/arch/blackfin/kernel/irqchip.c
35 +++ b/arch/blackfin/kernel/irqchip.c
36 @@ -38,14 +38,6 @@
37 #include <asm/pda.h>
38
39 static atomic_t irq_err_count;
40 -static spinlock_t irq_controller_lock;
41 -
42 -/*
43 - * Dummy mask/unmask handler
44 - */
45 -void dummy_mask_unmask_irq(unsigned int irq)
46 -{
47 -}
48
49 void ack_bad_irq(unsigned int irq)
50 {
51 @@ -53,21 +45,9 @@ void ack_bad_irq(unsigned int irq)
52 printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq);
53 }
54
55 -static struct irq_chip bad_chip = {
56 - .ack = dummy_mask_unmask_irq,
57 - .mask = dummy_mask_unmask_irq,
58 - .unmask = dummy_mask_unmask_irq,
59 -};
60 -
61 static struct irq_desc bad_irq_desc = {
62 - .status = IRQ_DISABLED,
63 - .chip = &bad_chip,
64 .handle_irq = handle_bad_irq,
65 - .depth = 1,
66 .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock),
67 -#ifdef CONFIG_SMP
68 - .affinity = CPU_MASK_ALL
69 -#endif
70 };
71
72 #ifdef CONFIG_CPUMASK_OFFSTACK
73 @@ -117,21 +97,13 @@ __attribute__((l1_text))
74 #endif
75 asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
76 {
77 - struct pt_regs *old_regs;
78 - struct irq_desc *desc = irq_desc + irq;
79 #ifndef CONFIG_IPIPE
80 unsigned short pending, other_ints;
81 #endif
82 - old_regs = set_irq_regs(regs);
83 -
84 - /*
85 - * Some hardware gives randomly wrong interrupts. Rather
86 - * than crashing, do something sensible.
87 - */
88 - if (irq >= NR_IRQS)
89 - desc = &bad_irq_desc;
90 + struct pt_regs *old_regs = set_irq_regs(regs);
91
92 irq_enter();
93 +
94 #ifdef CONFIG_DEBUG_STACKOVERFLOW
95 /* Debugging check for stack overflow: is there less than STACK_WARN free? */
96 {
97 @@ -147,7 +119,15 @@ asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
98 }
99 }
100 #endif
101 - generic_handle_irq(irq);
102 +
103 + /*
104 + * Some hardware gives randomly wrong interrupts. Rather
105 + * than crashing, do something sensible.
106 + */
107 + if (irq >= NR_IRQS)
108 + handle_bad_irq(irq, &bad_irq_desc);
109 + else
110 + generic_handle_irq(irq);
111
112 #ifndef CONFIG_IPIPE
113 /*
114 @@ -171,14 +151,6 @@ asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
115
116 void __init init_IRQ(void)
117 {
118 - struct irq_desc *desc;
119 - int irq;
120 -
121 - spin_lock_init(&irq_controller_lock);
122 - for (irq = 0, desc = irq_desc; irq < NR_IRQS; irq++, desc++) {
123 - *desc = bad_irq_desc;
124 - }
125 -
126 init_arch_irq();
127
128 #ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
129 diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c
130 index a58687b..b550bae 100644
131 --- a/arch/blackfin/kernel/setup.c
132 +++ b/arch/blackfin/kernel/setup.c
133 @@ -831,7 +831,8 @@ void __init setup_arch(char **cmdline_p)
134 defined(CONFIG_BF538) || defined(CONFIG_BF539)
135 _bfin_swrst = bfin_read_SWRST();
136 #else
137 - _bfin_swrst = bfin_read_SYSCR();
138 + /* Clear boot mode field */
139 + _bfin_swrst = bfin_read_SYSCR() & ~0xf;
140 #endif
141
142 #ifdef CONFIG_DEBUG_DOUBLEFAULT_PRINT
143 diff --git a/arch/blackfin/mach-common/head.S b/arch/blackfin/mach-common/head.S
144 index 698d4c0..7e5143c 100644
145 --- a/arch/blackfin/mach-common/head.S
146 +++ b/arch/blackfin/mach-common/head.S
147 @@ -126,25 +126,25 @@ ENTRY(__start)
148 * below
149 */
150 GET_PDA(p0, r0);
151 - r7 = [p0 + PDA_RETX];
152 + r6 = [p0 + PDA_RETX];
153 p1.l = _init_saved_retx;
154 p1.h = _init_saved_retx;
155 - [p1] = r7;
156 + [p1] = r6;
157
158 - r7 = [p0 + PDA_DCPLB];
159 + r6 = [p0 + PDA_DCPLB];
160 p1.l = _init_saved_dcplb_fault_addr;
161 p1.h = _init_saved_dcplb_fault_addr;
162 - [p1] = r7;
163 + [p1] = r6;
164
165 - r7 = [p0 + PDA_ICPLB];
166 + r6 = [p0 + PDA_ICPLB];
167 p1.l = _init_saved_icplb_fault_addr;
168 p1.h = _init_saved_icplb_fault_addr;
169 - [p1] = r7;
170 + [p1] = r6;
171
172 - r7 = [p0 + PDA_SEQSTAT];
173 + r6 = [p0 + PDA_SEQSTAT];
174 p1.l = _init_saved_seqstat;
175 p1.h = _init_saved_seqstat;
176 - [p1] = r7;
177 + [p1] = r6;
178 #endif
179
180 /* Initialize stack pointer */
181 diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c
182 index 93eab61..66fb780 100644
183 --- a/arch/blackfin/mach-common/smp.c
184 +++ b/arch/blackfin/mach-common/smp.c
185 @@ -139,7 +139,7 @@ static void ipi_call_function(unsigned int cpu, struct ipi_message *msg)
186
187 static irqreturn_t ipi_handler(int irq, void *dev_instance)
188 {
189 - struct ipi_message *msg, *mg;
190 + struct ipi_message *msg;
191 struct ipi_message_queue *msg_queue;
192 unsigned int cpu = smp_processor_id();
193
194 @@ -149,7 +149,8 @@ static irqreturn_t ipi_handler(int irq, void *dev_instance)
195 msg_queue->count++;
196
197 spin_lock(&msg_queue->lock);
198 - list_for_each_entry_safe(msg, mg, &msg_queue->head, list) {
199 + while (!list_empty(&msg_queue->head)) {
200 + msg = list_entry(msg_queue->head.next, typeof(*msg), list);
201 list_del(&msg->list);
202 switch (msg->type) {
203 case BFIN_IPI_RESCHEDULE:
204 @@ -216,7 +217,7 @@ int smp_call_function(void (*func)(void *info), void *info, int wait)
205 for_each_cpu_mask(cpu, callmap) {
206 msg_queue = &per_cpu(ipi_msg_queue, cpu);
207 spin_lock_irqsave(&msg_queue->lock, flags);
208 - list_add(&msg->list, &msg_queue->head);
209 + list_add_tail(&msg->list, &msg_queue->head);
210 spin_unlock_irqrestore(&msg_queue->lock, flags);
211 platform_send_ipi_cpu(cpu);
212 }
213 @@ -256,7 +257,7 @@ int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
214
215 msg_queue = &per_cpu(ipi_msg_queue, cpu);
216 spin_lock_irqsave(&msg_queue->lock, flags);
217 - list_add(&msg->list, &msg_queue->head);
218 + list_add_tail(&msg->list, &msg_queue->head);
219 spin_unlock_irqrestore(&msg_queue->lock, flags);
220 platform_send_ipi_cpu(cpu);
221
222 @@ -287,7 +288,7 @@ void smp_send_reschedule(int cpu)
223
224 msg_queue = &per_cpu(ipi_msg_queue, cpu);
225 spin_lock_irqsave(&msg_queue->lock, flags);
226 - list_add(&msg->list, &msg_queue->head);
227 + list_add_tail(&msg->list, &msg_queue->head);
228 spin_unlock_irqrestore(&msg_queue->lock, flags);
229 platform_send_ipi_cpu(cpu);
230
231 @@ -315,7 +316,7 @@ void smp_send_stop(void)
232 for_each_cpu_mask(cpu, callmap) {
233 msg_queue = &per_cpu(ipi_msg_queue, cpu);
234 spin_lock_irqsave(&msg_queue->lock, flags);
235 - list_add(&msg->list, &msg_queue->head);
236 + list_add_tail(&msg->list, &msg_queue->head);
237 spin_unlock_irqrestore(&msg_queue->lock, flags);
238 platform_send_ipi_cpu(cpu);
239 }
240 diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h
241 index b51a1e8..abbc09b 100644
242 --- a/arch/x86/include/asm/pci.h
243 +++ b/arch/x86/include/asm/pci.h
244 @@ -91,7 +91,7 @@ extern void pci_iommu_alloc(void);
245
246 #define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys)
247
248 -#if defined(CONFIG_X86_64) || defined(CONFIG_DMA_API_DEBUG)
249 +#if defined(CONFIG_X86_64) || defined(CONFIG_DMAR) || defined(CONFIG_DMA_API_DEBUG)
250
251 #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
252 dma_addr_t ADDR_NAME;
253 diff --git a/block/blk-core.c b/block/blk-core.c
254 index c89883b..a59f180 100644
255 --- a/block/blk-core.c
256 +++ b/block/blk-core.c
257 @@ -1158,6 +1158,11 @@ static int __make_request(struct request_queue *q, struct bio *bio)
258
259 nr_sectors = bio_sectors(bio);
260
261 + if (bio_barrier(bio) && bio_has_data(bio) &&
262 + (q->next_ordered == QUEUE_ORDERED_NONE)) {
263 + bio_endio(bio, -EOPNOTSUPP);
264 + return 0;
265 + }
266 /*
267 * low level driver can indicate that it wants pages above a
268 * certain limit bounced to low memory (ie for highmem, or even
269 @@ -1461,11 +1466,6 @@ static inline void __generic_make_request(struct bio *bio)
270 err = -EOPNOTSUPP;
271 goto end_io;
272 }
273 - if (bio_barrier(bio) && bio_has_data(bio) &&
274 - (q->next_ordered == QUEUE_ORDERED_NONE)) {
275 - err = -EOPNOTSUPP;
276 - goto end_io;
277 - }
278
279 ret = q->make_request_fn(q, bio);
280 } while (ret);
281 diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
282 index 1300df6..39e1b58 100644
283 --- a/drivers/block/floppy.c
284 +++ b/drivers/block/floppy.c
285 @@ -3327,7 +3327,10 @@ static inline int set_geometry(unsigned int cmd, struct floppy_struct *g,
286 if (!capable(CAP_SYS_ADMIN))
287 return -EPERM;
288 mutex_lock(&open_lock);
289 - LOCK_FDC(drive, 1);
290 + if (lock_fdc(drive, 1)) {
291 + mutex_unlock(&open_lock);
292 + return -EINTR;
293 + }
294 floppy_type[type] = *g;
295 floppy_type[type].name = "user format";
296 for (cnt = type << 2; cnt < (type << 2) + 4; cnt++)
297 diff --git a/drivers/md/md.c b/drivers/md/md.c
298 index 641b211..eb1b73f 100644
299 --- a/drivers/md/md.c
300 +++ b/drivers/md/md.c
301 @@ -3589,7 +3589,8 @@ suspend_lo_store(mddev_t *mddev, const char *buf, size_t len)
302 char *e;
303 unsigned long long new = simple_strtoull(buf, &e, 10);
304
305 - if (mddev->pers->quiesce == NULL)
306 + if (mddev->pers == NULL ||
307 + mddev->pers->quiesce == NULL)
308 return -EINVAL;
309 if (buf == e || (*e && *e != '\n'))
310 return -EINVAL;
311 @@ -3617,7 +3618,8 @@ suspend_hi_store(mddev_t *mddev, const char *buf, size_t len)
312 char *e;
313 unsigned long long new = simple_strtoull(buf, &e, 10);
314
315 - if (mddev->pers->quiesce == NULL)
316 + if (mddev->pers == NULL ||
317 + mddev->pers->quiesce == NULL)
318 return -EINVAL;
319 if (buf == e || (*e && *e != '\n'))
320 return -EINVAL;
321 @@ -3876,6 +3878,8 @@ static int md_alloc(dev_t dev, char *name)
322 if (mddev2->gendisk &&
323 strcmp(mddev2->gendisk->disk_name, name) == 0) {
324 spin_unlock(&all_mddevs_lock);
325 + mutex_unlock(&disks_mutex);
326 + mddev_put(mddev);
327 return -EEXIST;
328 }
329 spin_unlock(&all_mddevs_lock);
330 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
331 index c0434e0..1f98ea4 100644
332 --- a/drivers/md/raid5.c
333 +++ b/drivers/md/raid5.c
334 @@ -3703,7 +3703,8 @@ static int make_request(struct request_queue *q, struct bio * bi)
335 /* FIXME what if we get a false positive because these
336 * are being updated.
337 */
338 - if (logical_sector >= mddev->suspend_lo &&
339 + if (bio_data_dir(bi) == WRITE &&
340 + logical_sector >= mddev->suspend_lo &&
341 logical_sector < mddev->suspend_hi) {
342 release_stripe(sh);
343 schedule();
344 diff --git a/drivers/net/tun.c b/drivers/net/tun.c
345 index 1be6a6b..8289292 100644
346 --- a/drivers/net/tun.c
347 +++ b/drivers/net/tun.c
348 @@ -486,12 +486,14 @@ static unsigned int tun_chr_poll(struct file *file, poll_table * wait)
349 {
350 struct tun_file *tfile = file->private_data;
351 struct tun_struct *tun = __tun_get(tfile);
352 - struct sock *sk = tun->sk;
353 + struct sock *sk;
354 unsigned int mask = 0;
355
356 if (!tun)
357 return POLLERR;
358
359 + sk = tun->sk;
360 +
361 DBG(KERN_INFO "%s: tun_chr_poll\n", tun->dev->name);
362
363 poll_wait(file, &tun->socket.wait, wait);
364 diff --git a/drivers/pci/iova.c b/drivers/pci/iova.c
365 index 2287116..46dd440 100644
366 --- a/drivers/pci/iova.c
367 +++ b/drivers/pci/iova.c
368 @@ -1,9 +1,19 @@
369 /*
370 - * Copyright (c) 2006, Intel Corporation.
371 + * Copyright © 2006-2009, Intel Corporation.
372 *
373 - * This file is released under the GPLv2.
374 + * This program is free software; you can redistribute it and/or modify it
375 + * under the terms and conditions of the GNU General Public License,
376 + * version 2, as published by the Free Software Foundation.
377 + *
378 + * This program is distributed in the hope it will be useful, but WITHOUT
379 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
380 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
381 + * more details.
382 + *
383 + * You should have received a copy of the GNU General Public License along with
384 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
385 + * Place - Suite 330, Boston, MA 02111-1307 USA.
386 *
387 - * Copyright (C) 2006-2008 Intel Corporation
388 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
389 */
390
391 @@ -123,7 +133,15 @@ move_left:
392 /* Insert the new_iova into domain rbtree by holding writer lock */
393 /* Add new node and rebalance tree. */
394 {
395 - struct rb_node **entry = &((prev)), *parent = NULL;
396 + struct rb_node **entry, *parent = NULL;
397 +
398 + /* If we have 'prev', it's a valid place to start the
399 + insertion. Otherwise, start from the root. */
400 + if (prev)
401 + entry = &prev;
402 + else
403 + entry = &iovad->rbroot.rb_node;
404 +
405 /* Figure out where to put new node */
406 while (*entry) {
407 struct iova *this = container_of(*entry,
408 diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
409 index ba76b68..eb40335 100644
410 --- a/fs/fuse/dev.c
411 +++ b/fs/fuse/dev.c
412 @@ -904,7 +904,7 @@ static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
413 unsigned long nr_segs, loff_t pos)
414 {
415 int err;
416 - unsigned nbytes = iov_length(iov, nr_segs);
417 + size_t nbytes = iov_length(iov, nr_segs);
418 struct fuse_req *req;
419 struct fuse_out_header oh;
420 struct fuse_copy_state cs;
421 diff --git a/fs/fuse/file.c b/fs/fuse/file.c
422 index 06f30e9..053ff1c 100644
423 --- a/fs/fuse/file.c
424 +++ b/fs/fuse/file.c
425 @@ -1867,7 +1867,7 @@ static unsigned fuse_file_poll(struct file *file, poll_table *wait)
426
427 req = fuse_get_req(fc);
428 if (IS_ERR(req))
429 - return PTR_ERR(req);
430 + return POLLERR;
431
432 req->in.h.opcode = FUSE_POLL;
433 req->in.h.nodeid = get_node_id(inode);
434 diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
435 index d7d50d7..aa00800 100644
436 --- a/include/asm-generic/percpu.h
437 +++ b/include/asm-generic/percpu.h
438 @@ -97,4 +97,8 @@ extern void setup_per_cpu_areas(void);
439 #define PER_CPU_ATTRIBUTES
440 #endif
441
442 +#ifndef PER_CPU_DEF_ATTRIBUTES
443 +#define PER_CPU_DEF_ATTRIBUTES
444 +#endif
445 +
446 #endif /* _ASM_GENERIC_PERCPU_H_ */
447 diff --git a/include/linux/mm.h b/include/linux/mm.h
448 index bff1f0d..0c21af6 100644
449 --- a/include/linux/mm.h
450 +++ b/include/linux/mm.h
451 @@ -580,12 +580,10 @@ static inline void set_page_links(struct page *page, enum zone_type zone,
452 */
453 static inline unsigned long round_hint_to_min(unsigned long hint)
454 {
455 -#ifdef CONFIG_SECURITY
456 hint &= PAGE_MASK;
457 if (((void *)hint != NULL) &&
458 (hint < mmap_min_addr))
459 return PAGE_ALIGN(mmap_min_addr);
460 -#endif
461 return hint;
462 }
463
464 diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
465 index 8f921d7..68438e1 100644
466 --- a/include/linux/percpu-defs.h
467 +++ b/include/linux/percpu-defs.h
468 @@ -24,7 +24,8 @@
469
470 #define DEFINE_PER_CPU_SECTION(type, name, section) \
471 __attribute__((__section__(PER_CPU_BASE_SECTION section))) \
472 - PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
473 + PER_CPU_ATTRIBUTES PER_CPU_DEF_ATTRIBUTES \
474 + __typeof__(type) per_cpu__##name
475
476 /*
477 * Variant on the per-CPU variable declaration/definition theme used for
478 diff --git a/include/linux/personality.h b/include/linux/personality.h
479 index a84e9ff..1261208 100644
480 --- a/include/linux/personality.h
481 +++ b/include/linux/personality.h
482 @@ -40,7 +40,10 @@ enum {
483 * Security-relevant compatibility flags that must be
484 * cleared upon setuid or setgid exec:
485 */
486 -#define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC|ADDR_NO_RANDOMIZE)
487 +#define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
488 + ADDR_NO_RANDOMIZE | \
489 + ADDR_COMPAT_LAYOUT | \
490 + MMAP_PAGE_ZERO)
491
492 /*
493 * Personality types.
494 diff --git a/include/linux/security.h b/include/linux/security.h
495 index d5fd616..5eff459 100644
496 --- a/include/linux/security.h
497 +++ b/include/linux/security.h
498 @@ -2197,6 +2197,8 @@ static inline int security_file_mmap(struct file *file, unsigned long reqprot,
499 unsigned long addr,
500 unsigned long addr_only)
501 {
502 + if ((addr < mmap_min_addr) && !capable(CAP_SYS_RAWIO))
503 + return -EACCES;
504 return 0;
505 }
506
507 diff --git a/kernel/futex.c b/kernel/futex.c
508 index d546b2d..4d973bd 100644
509 --- a/kernel/futex.c
510 +++ b/kernel/futex.c
511 @@ -241,6 +241,7 @@ again:
512 if (err < 0)
513 return err;
514
515 + page = compound_head(page);
516 lock_page(page);
517 if (!page->mapping) {
518 unlock_page(page);
519 @@ -278,6 +279,25 @@ void put_futex_key(int fshared, union futex_key *key)
520 drop_futex_key_refs(key);
521 }
522
523 +/*
524 + * fault_in_user_writeable - fault in user address and verify RW access
525 + * @uaddr: pointer to faulting user space address
526 + *
527 + * Slow path to fixup the fault we just took in the atomic write
528 + * access to @uaddr.
529 + *
530 + * We have no generic implementation of a non destructive write to the
531 + * user address. We know that we faulted in the atomic pagefault
532 + * disabled section so we can as well avoid the #PF overhead by
533 + * calling get_user_pages() right away.
534 + */
535 +static int fault_in_user_writeable(u32 __user *uaddr)
536 +{
537 + int ret = get_user_pages(current, current->mm, (unsigned long)uaddr,
538 + 1, 1, 0, NULL, NULL);
539 + return ret < 0 ? ret : 0;
540 +}
541 +
542 static u32 cmpxchg_futex_value_locked(u32 __user *uaddr, u32 uval, u32 newval)
543 {
544 u32 curval;
545 @@ -739,7 +759,6 @@ retry:
546 retry_private:
547 op_ret = futex_atomic_op_inuser(op, uaddr2);
548 if (unlikely(op_ret < 0)) {
549 - u32 dummy;
550
551 double_unlock_hb(hb1, hb2);
552
553 @@ -757,7 +776,7 @@ retry_private:
554 goto out_put_keys;
555 }
556
557 - ret = get_user(dummy, uaddr2);
558 + ret = fault_in_user_writeable(uaddr2);
559 if (ret)
560 goto out_put_keys;
561
562 @@ -1097,7 +1116,7 @@ retry:
563 handle_fault:
564 spin_unlock(q->lock_ptr);
565
566 - ret = get_user(uval, uaddr);
567 + ret = fault_in_user_writeable(uaddr);
568
569 spin_lock(q->lock_ptr);
570
571 @@ -1552,16 +1571,9 @@ out:
572 return ret;
573
574 uaddr_faulted:
575 - /*
576 - * We have to r/w *(int __user *)uaddr, and we have to modify it
577 - * atomically. Therefore, if we continue to fault after get_user()
578 - * below, we need to handle the fault ourselves, while still holding
579 - * the mmap_sem. This can occur if the uaddr is under contention as
580 - * we have to drop the mmap_sem in order to call get_user().
581 - */
582 queue_unlock(&q, hb);
583
584 - ret = get_user(uval, uaddr);
585 + ret = fault_in_user_writeable(uaddr);
586 if (ret)
587 goto out_put_key;
588
589 @@ -1657,17 +1669,10 @@ out:
590 return ret;
591
592 pi_faulted:
593 - /*
594 - * We have to r/w *(int __user *)uaddr, and we have to modify it
595 - * atomically. Therefore, if we continue to fault after get_user()
596 - * below, we need to handle the fault ourselves, while still holding
597 - * the mmap_sem. This can occur if the uaddr is under contention as
598 - * we have to drop the mmap_sem in order to call get_user().
599 - */
600 spin_unlock(&hb->lock);
601 put_futex_key(fshared, &key);
602
603 - ret = get_user(uval, uaddr);
604 + ret = fault_in_user_writeable(uaddr);
605 if (!ret)
606 goto retry;
607
608 diff --git a/kernel/resource.c b/kernel/resource.c
609 index ac5f3a3..78b0872 100644
610 --- a/kernel/resource.c
611 +++ b/kernel/resource.c
612 @@ -787,7 +787,7 @@ static int __init reserve_setup(char *str)
613 static struct resource reserve[MAXRESERVE];
614
615 for (;;) {
616 - int io_start, io_num;
617 + unsigned int io_start, io_num;
618 int x = reserved;
619
620 if (get_option (&str, &io_start) != 2)
621 diff --git a/kernel/sysctl.c b/kernel/sysctl.c
622 index b2970d5..e5bfcc7 100644
623 --- a/kernel/sysctl.c
624 +++ b/kernel/sysctl.c
625 @@ -1225,7 +1225,6 @@ static struct ctl_table vm_table[] = {
626 .strategy = &sysctl_jiffies,
627 },
628 #endif
629 -#ifdef CONFIG_SECURITY
630 {
631 .ctl_name = CTL_UNNUMBERED,
632 .procname = "mmap_min_addr",
633 @@ -1234,7 +1233,6 @@ static struct ctl_table vm_table[] = {
634 .mode = 0644,
635 .proc_handler = &proc_doulongvec_minmax,
636 },
637 -#endif
638 #ifdef CONFIG_NUMA
639 {
640 .ctl_name = CTL_UNNUMBERED,
641 diff --git a/lib/dma-debug.c b/lib/dma-debug.c
642 index 2b16536..6a4e3d4 100644
643 --- a/lib/dma-debug.c
644 +++ b/lib/dma-debug.c
645 @@ -599,7 +599,7 @@ static inline bool overlap(void *addr, u64 size, void *start, void *end)
646
647 return ((addr >= start && addr < end) ||
648 (addr2 >= start && addr2 < end) ||
649 - ((addr < start) && (addr2 >= end)));
650 + ((addr < start) && (addr2 > end)));
651 }
652
653 static void check_for_illegal_area(struct device *dev, void *addr, u64 size)
654 diff --git a/mm/Kconfig b/mm/Kconfig
655 index c2b57d8..71830ba 100644
656 --- a/mm/Kconfig
657 +++ b/mm/Kconfig
658 @@ -226,6 +226,25 @@ config HAVE_MLOCKED_PAGE_BIT
659 config MMU_NOTIFIER
660 bool
661
662 +config DEFAULT_MMAP_MIN_ADDR
663 + int "Low address space to protect from user allocation"
664 + default 4096
665 + help
666 + This is the portion of low virtual memory which should be protected
667 + from userspace allocation. Keeping a user from writing to low pages
668 + can help reduce the impact of kernel NULL pointer bugs.
669 +
670 + For most ia64, ppc64 and x86 users with lots of address space
671 + a value of 65536 is reasonable and should cause no problems.
672 + On arm and other archs it should not be higher than 32768.
673 + Programs which use vm86 functionality would either need additional
674 + permissions from either the LSM or the capabilities module or have
675 + this protection disabled.
676 +
677 + This value can be changed after boot using the
678 + /proc/sys/vm/mmap_min_addr tunable.
679 +
680 +
681 config NOMMU_INITIAL_TRIM_EXCESS
682 int "Turn on mmap() excess space trimming before booting"
683 depends on !MMU
684 diff --git a/mm/mmap.c b/mm/mmap.c
685 index 6b7b1a9..2b43fa1 100644
686 --- a/mm/mmap.c
687 +++ b/mm/mmap.c
688 @@ -87,6 +87,9 @@ int sysctl_overcommit_ratio = 50; /* default is 50% */
689 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
690 struct percpu_counter vm_committed_as;
691
692 +/* amount of vm to protect from userspace access */
693 +unsigned long mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
694 +
695 /*
696 * Check that a process has enough memory to allocate a new virtual
697 * mapping. 0 means there is enough memory for the allocation to
698 diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
699 index f11931c..9c22032 100644
700 --- a/net/ipv4/arp.c
701 +++ b/net/ipv4/arp.c
702 @@ -801,11 +801,8 @@ static int arp_process(struct sk_buff *skb)
703 * cache.
704 */
705
706 - /*
707 - * Special case: IPv4 duplicate address detection packet (RFC2131)
708 - * and Gratuitous ARP/ARP Announce. (RFC3927, Section 2.4)
709 - */
710 - if (sip == 0 || tip == sip) {
711 + /* Special case: IPv4 duplicate address detection packet (RFC2131) */
712 + if (sip == 0) {
713 if (arp->ar_op == htons(ARPOP_REQUEST) &&
714 inet_addr_type(net, tip) == RTN_LOCAL &&
715 !arp_ignore(in_dev, sip, tip))
716 diff --git a/security/Kconfig b/security/Kconfig
717 index bb24477..d23c839 100644
718 --- a/security/Kconfig
719 +++ b/security/Kconfig
720 @@ -110,28 +110,8 @@ config SECURITY_ROOTPLUG
721
722 See <http://www.linuxjournal.com/article.php?sid=6279> for
723 more information about this module.
724 -
725 - If you are unsure how to answer this question, answer N.
726 -
727 -config SECURITY_DEFAULT_MMAP_MIN_ADDR
728 - int "Low address space to protect from user allocation"
729 - depends on SECURITY
730 - default 0
731 - help
732 - This is the portion of low virtual memory which should be protected
733 - from userspace allocation. Keeping a user from writing to low pages
734 - can help reduce the impact of kernel NULL pointer bugs.
735 -
736 - For most ia64, ppc64 and x86 users with lots of address space
737 - a value of 65536 is reasonable and should cause no problems.
738 - On arm and other archs it should not be higher than 32768.
739 - Programs which use vm86 functionality would either need additional
740 - permissions from either the LSM or the capabilities module or have
741 - this protection disabled.
742 -
743 - This value can be changed after boot using the
744 - /proc/sys/vm/mmap_min_addr tunable.
745
746 + If you are unsure how to answer this question, answer N.
747
748 source security/selinux/Kconfig
749 source security/smack/Kconfig
750 diff --git a/security/security.c b/security/security.c
751 index 5284255..dc7674f 100644
752 --- a/security/security.c
753 +++ b/security/security.c
754 @@ -26,9 +26,6 @@ extern void security_fixup_ops(struct security_operations *ops);
755
756 struct security_operations *security_ops; /* Initialized to NULL */
757
758 -/* amount of vm to protect from userspace access */
759 -unsigned long mmap_min_addr = CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR;
760 -
761 static inline int verify(struct security_operations *ops)
762 {
763 /* verify the security_operations structure exists */

  ViewVC Help
Powered by ViewVC 1.1.20