/[linux-patches]/genpatches-2.6/tags/2.6.38-5/1001_linux-2.6.38.2.patch
Gentoo

Contents of /genpatches-2.6/tags/2.6.38-5/1001_linux-2.6.38.2.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1910 - (show annotations) (download)
Tue May 3 13:08:58 2011 UTC (3 years, 5 months ago) by mpagano
File size: 81581 byte(s)
2.6.38-5 release
1 diff --git a/arch/arm/mach-s3c2440/mach-mini2440.c b/arch/arm/mach-s3c2440/mach-mini2440.c
2 index f62bb4c..7c3fb07 100644
3 --- a/arch/arm/mach-s3c2440/mach-mini2440.c
4 +++ b/arch/arm/mach-s3c2440/mach-mini2440.c
5 @@ -506,6 +506,11 @@ static struct i2c_board_info mini2440_i2c_devs[] __initdata = {
6 },
7 };
8
9 +static struct platform_device uda1340_codec = {
10 + .name = "uda134x-codec",
11 + .id = -1,
12 +};
13 +
14 static struct platform_device *mini2440_devices[] __initdata = {
15 &s3c_device_ohci,
16 &s3c_device_wdt,
17 @@ -521,7 +526,9 @@ static struct platform_device *mini2440_devices[] __initdata = {
18 &s3c_device_nand,
19 &s3c_device_sdi,
20 &s3c_device_iis,
21 + &uda1340_codec,
22 &mini2440_audio,
23 + &samsung_asoc_dma,
24 };
25
26 static void __init mini2440_map_io(void)
27 diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c
28 index 90a15d2..2130ca6 100644
29 --- a/arch/sh/kernel/ptrace_32.c
30 +++ b/arch/sh/kernel/ptrace_32.c
31 @@ -101,6 +101,8 @@ static int set_single_step(struct task_struct *tsk, unsigned long addr)
32
33 attr = bp->attr;
34 attr.bp_addr = addr;
35 + /* reenable breakpoint */
36 + attr.disabled = false;
37 err = modify_user_hw_breakpoint(bp, &attr);
38 if (unlikely(err))
39 return err;
40 @@ -392,6 +394,9 @@ long arch_ptrace(struct task_struct *child, long request,
41 tmp = 0;
42 } else {
43 unsigned long index;
44 + ret = init_fpu(child);
45 + if (ret)
46 + break;
47 index = addr - offsetof(struct user, fpu);
48 tmp = ((unsigned long *)child->thread.xstate)
49 [index >> 2];
50 @@ -423,6 +428,9 @@ long arch_ptrace(struct task_struct *child, long request,
51 else if (addr >= offsetof(struct user, fpu) &&
52 addr < offsetof(struct user, u_fpvalid)) {
53 unsigned long index;
54 + ret = init_fpu(child);
55 + if (ret)
56 + break;
57 index = addr - offsetof(struct user, fpu);
58 set_stopped_child_used_math(child);
59 ((unsigned long *)child->thread.xstate)
60 diff --git a/arch/sh/kernel/ptrace_64.c b/arch/sh/kernel/ptrace_64.c
61 index 4436eac..c8f9764 100644
62 --- a/arch/sh/kernel/ptrace_64.c
63 +++ b/arch/sh/kernel/ptrace_64.c
64 @@ -403,6 +403,9 @@ long arch_ptrace(struct task_struct *child, long request,
65 else if ((addr >= offsetof(struct user, fpu)) &&
66 (addr < offsetof(struct user, u_fpvalid))) {
67 unsigned long index;
68 + ret = init_fpu(child);
69 + if (ret)
70 + break;
71 index = addr - offsetof(struct user, fpu);
72 tmp = get_fpu_long(child, index);
73 } else if (addr == offsetof(struct user, u_fpvalid)) {
74 @@ -442,6 +445,9 @@ long arch_ptrace(struct task_struct *child, long request,
75 else if ((addr >= offsetof(struct user, fpu)) &&
76 (addr < offsetof(struct user, u_fpvalid))) {
77 unsigned long index;
78 + ret = init_fpu(child);
79 + if (ret)
80 + break;
81 index = addr - offsetof(struct user, fpu);
82 ret = put_fpu_long(child, index, data);
83 }
84 diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
85 index c8b4efa..9ca3b0e 100644
86 --- a/arch/x86/kernel/entry_32.S
87 +++ b/arch/x86/kernel/entry_32.S
88 @@ -1413,7 +1413,7 @@ ENTRY(async_page_fault)
89 CFI_ADJUST_CFA_OFFSET 4
90 jmp error_code
91 CFI_ENDPROC
92 -END(apf_page_fault)
93 +END(async_page_fault)
94 #endif
95
96 /*
97 diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
98 index 2d2673c..5655c22 100644
99 --- a/arch/x86/kernel/head64.c
100 +++ b/arch/x86/kernel/head64.c
101 @@ -77,9 +77,6 @@ void __init x86_64_start_kernel(char * real_mode_data)
102 /* Make NULL pointers segfault */
103 zap_identity_mappings();
104
105 - /* Cleanup the over mapped high alias */
106 - cleanup_highmap();
107 -
108 max_pfn_mapped = KERNEL_IMAGE_SIZE >> PAGE_SHIFT;
109
110 for (i = 0; i < NUM_EXCEPTION_VECTORS; i++) {
111 diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
112 index d3cfe26..e543fe9 100644
113 --- a/arch/x86/kernel/setup.c
114 +++ b/arch/x86/kernel/setup.c
115 @@ -297,6 +297,9 @@ static void __init init_gbpages(void)
116 static inline void init_gbpages(void)
117 {
118 }
119 +static void __init cleanup_highmap(void)
120 +{
121 +}
122 #endif
123
124 static void __init reserve_brk(void)
125 @@ -922,6 +925,8 @@ void __init setup_arch(char **cmdline_p)
126 */
127 reserve_brk();
128
129 + cleanup_highmap();
130 +
131 memblock.current_limit = get_max_mapped();
132 memblock_x86_fill();
133
134 diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
135 index 947f42a..f13ff3a 100644
136 --- a/arch/x86/mm/init.c
137 +++ b/arch/x86/mm/init.c
138 @@ -279,25 +279,6 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
139 load_cr3(swapper_pg_dir);
140 #endif
141
142 -#ifdef CONFIG_X86_64
143 - if (!after_bootmem && !start) {
144 - pud_t *pud;
145 - pmd_t *pmd;
146 -
147 - mmu_cr4_features = read_cr4();
148 -
149 - /*
150 - * _brk_end cannot change anymore, but it and _end may be
151 - * located on different 2M pages. cleanup_highmap(), however,
152 - * can only consider _end when it runs, so destroy any
153 - * mappings beyond _brk_end here.
154 - */
155 - pud = pud_offset(pgd_offset_k(_brk_end), _brk_end);
156 - pmd = pmd_offset(pud, _brk_end - 1);
157 - while (++pmd <= pmd_offset(pud, (unsigned long)_end - 1))
158 - pmd_clear(pmd);
159 - }
160 -#endif
161 __flush_tlb_all();
162
163 if (!after_bootmem && e820_table_end > e820_table_start)
164 diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
165 index c14a542..68f9921 100644
166 --- a/arch/x86/mm/init_64.c
167 +++ b/arch/x86/mm/init_64.c
168 @@ -51,6 +51,7 @@
169 #include <asm/numa.h>
170 #include <asm/cacheflush.h>
171 #include <asm/init.h>
172 +#include <asm/setup.h>
173
174 static int __init parse_direct_gbpages_off(char *arg)
175 {
176 @@ -293,18 +294,18 @@ void __init init_extra_mapping_uc(unsigned long phys, unsigned long size)
177 * to the compile time generated pmds. This results in invalid pmds up
178 * to the point where we hit the physaddr 0 mapping.
179 *
180 - * We limit the mappings to the region from _text to _end. _end is
181 - * rounded up to the 2MB boundary. This catches the invalid pmds as
182 + * We limit the mappings to the region from _text to _brk_end. _brk_end
183 + * is rounded up to the 2MB boundary. This catches the invalid pmds as
184 * well, as they are located before _text:
185 */
186 void __init cleanup_highmap(void)
187 {
188 unsigned long vaddr = __START_KERNEL_map;
189 - unsigned long end = roundup((unsigned long)_end, PMD_SIZE) - 1;
190 + unsigned long vaddr_end = __START_KERNEL_map + (max_pfn_mapped << PAGE_SHIFT);
191 + unsigned long end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
192 pmd_t *pmd = level2_kernel_pgt;
193 - pmd_t *last_pmd = pmd + PTRS_PER_PMD;
194
195 - for (; pmd < last_pmd; pmd++, vaddr += PMD_SIZE) {
196 + for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) {
197 if (pmd_none(*pmd))
198 continue;
199 if (vaddr < (unsigned long) _text || vaddr > end)
200 diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
201 index f608942..6020562 100644
202 --- a/arch/x86/xen/mmu.c
203 +++ b/arch/x86/xen/mmu.c
204 @@ -1651,9 +1651,6 @@ static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
205 for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
206 pte_t pte;
207
208 - if (pfn > max_pfn_mapped)
209 - max_pfn_mapped = pfn;
210 -
211 if (!pte_none(pte_page[pteidx]))
212 continue;
213
214 @@ -1711,6 +1708,12 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
215 pud_t *l3;
216 pmd_t *l2;
217
218 + /* max_pfn_mapped is the last pfn mapped in the initial memory
219 + * mappings. Considering that on Xen after the kernel mappings we
220 + * have the mappings of some pages that don't exist in pfn space, we
221 + * set max_pfn_mapped to the last real pfn mapped. */
222 + max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list));
223 +
224 /* Zap identity mapping */
225 init_level4_pgt[0] = __pgd(0);
226
227 @@ -1815,9 +1818,7 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
228 initial_kernel_pmd =
229 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
230
231 - max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
232 - xen_start_info->nr_pt_frames * PAGE_SIZE +
233 - 512*1024);
234 + max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list));
235
236 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
237 memcpy(initial_kernel_pmd, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD);
238 diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c
239 index 69ad529..ea5ac2d 100644
240 --- a/drivers/firmware/dcdbas.c
241 +++ b/drivers/firmware/dcdbas.c
242 @@ -268,8 +268,10 @@ int dcdbas_smi_request(struct smi_cmd *smi_cmd)
243 }
244
245 /* generate SMI */
246 + /* inb to force posted write through and make SMI happen now */
247 asm volatile (
248 - "outb %b0,%w1"
249 + "outb %b0,%w1\n"
250 + "inb %w1"
251 : /* no output args */
252 : "a" (smi_cmd->command_code),
253 "d" (smi_cmd->command_address),
254 diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
255 index 654faa8..6a5371b 100644
256 --- a/drivers/gpu/drm/drm_crtc.c
257 +++ b/drivers/gpu/drm/drm_crtc.c
258 @@ -1073,6 +1073,9 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
259 uint32_t __user *encoder_id;
260 struct drm_mode_group *mode_group;
261
262 + if (!drm_core_check_feature(dev, DRIVER_MODESET))
263 + return -EINVAL;
264 +
265 mutex_lock(&dev->mode_config.mutex);
266
267 /*
268 @@ -1244,6 +1247,9 @@ int drm_mode_getcrtc(struct drm_device *dev,
269 struct drm_mode_object *obj;
270 int ret = 0;
271
272 + if (!drm_core_check_feature(dev, DRIVER_MODESET))
273 + return -EINVAL;
274 +
275 mutex_lock(&dev->mode_config.mutex);
276
277 obj = drm_mode_object_find(dev, crtc_resp->crtc_id,
278 @@ -1312,6 +1318,9 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
279 uint64_t __user *prop_values;
280 uint32_t __user *encoder_ptr;
281
282 + if (!drm_core_check_feature(dev, DRIVER_MODESET))
283 + return -EINVAL;
284 +
285 memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo));
286
287 DRM_DEBUG_KMS("[CONNECTOR:%d:?]\n", out_resp->connector_id);
288 @@ -1431,6 +1440,9 @@ int drm_mode_getencoder(struct drm_device *dev, void *data,
289 struct drm_encoder *encoder;
290 int ret = 0;
291
292 + if (!drm_core_check_feature(dev, DRIVER_MODESET))
293 + return -EINVAL;
294 +
295 mutex_lock(&dev->mode_config.mutex);
296 obj = drm_mode_object_find(dev, enc_resp->encoder_id,
297 DRM_MODE_OBJECT_ENCODER);
298 @@ -1486,6 +1498,9 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
299 int ret = 0;
300 int i;
301
302 + if (!drm_core_check_feature(dev, DRIVER_MODESET))
303 + return -EINVAL;
304 +
305 mutex_lock(&dev->mode_config.mutex);
306 obj = drm_mode_object_find(dev, crtc_req->crtc_id,
307 DRM_MODE_OBJECT_CRTC);
308 @@ -1603,6 +1618,9 @@ int drm_mode_cursor_ioctl(struct drm_device *dev,
309 struct drm_crtc *crtc;
310 int ret = 0;
311
312 + if (!drm_core_check_feature(dev, DRIVER_MODESET))
313 + return -EINVAL;
314 +
315 if (!req->flags) {
316 DRM_ERROR("no operation set\n");
317 return -EINVAL;
318 @@ -1667,6 +1685,9 @@ int drm_mode_addfb(struct drm_device *dev,
319 struct drm_framebuffer *fb;
320 int ret = 0;
321
322 + if (!drm_core_check_feature(dev, DRIVER_MODESET))
323 + return -EINVAL;
324 +
325 if ((config->min_width > r->width) || (r->width > config->max_width)) {
326 DRM_ERROR("mode new framebuffer width not within limits\n");
327 return -EINVAL;
328 @@ -1724,6 +1745,9 @@ int drm_mode_rmfb(struct drm_device *dev,
329 int ret = 0;
330 int found = 0;
331
332 + if (!drm_core_check_feature(dev, DRIVER_MODESET))
333 + return -EINVAL;
334 +
335 mutex_lock(&dev->mode_config.mutex);
336 obj = drm_mode_object_find(dev, *id, DRM_MODE_OBJECT_FB);
337 /* TODO check that we realy get a framebuffer back. */
338 @@ -1780,6 +1804,9 @@ int drm_mode_getfb(struct drm_device *dev,
339 struct drm_framebuffer *fb;
340 int ret = 0;
341
342 + if (!drm_core_check_feature(dev, DRIVER_MODESET))
343 + return -EINVAL;
344 +
345 mutex_lock(&dev->mode_config.mutex);
346 obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
347 if (!obj) {
348 @@ -1813,6 +1840,9 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
349 int num_clips;
350 int ret = 0;
351
352 + if (!drm_core_check_feature(dev, DRIVER_MODESET))
353 + return -EINVAL;
354 +
355 mutex_lock(&dev->mode_config.mutex);
356 obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
357 if (!obj) {
358 @@ -1996,6 +2026,9 @@ int drm_mode_attachmode_ioctl(struct drm_device *dev,
359 struct drm_mode_modeinfo *umode = &mode_cmd->mode;
360 int ret = 0;
361
362 + if (!drm_core_check_feature(dev, DRIVER_MODESET))
363 + return -EINVAL;
364 +
365 mutex_lock(&dev->mode_config.mutex);
366
367 obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR);
368 @@ -2042,6 +2075,9 @@ int drm_mode_detachmode_ioctl(struct drm_device *dev,
369 struct drm_mode_modeinfo *umode = &mode_cmd->mode;
370 int ret = 0;
371
372 + if (!drm_core_check_feature(dev, DRIVER_MODESET))
373 + return -EINVAL;
374 +
375 mutex_lock(&dev->mode_config.mutex);
376
377 obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR);
378 @@ -2211,6 +2247,9 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
379 uint64_t __user *values_ptr;
380 uint32_t __user *blob_length_ptr;
381
382 + if (!drm_core_check_feature(dev, DRIVER_MODESET))
383 + return -EINVAL;
384 +
385 mutex_lock(&dev->mode_config.mutex);
386 obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY);
387 if (!obj) {
388 @@ -2333,6 +2372,9 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
389 int ret = 0;
390 void *blob_ptr;
391
392 + if (!drm_core_check_feature(dev, DRIVER_MODESET))
393 + return -EINVAL;
394 +
395 mutex_lock(&dev->mode_config.mutex);
396 obj = drm_mode_object_find(dev, out_resp->blob_id, DRM_MODE_OBJECT_BLOB);
397 if (!obj) {
398 @@ -2393,6 +2435,9 @@ int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
399 int ret = -EINVAL;
400 int i;
401
402 + if (!drm_core_check_feature(dev, DRIVER_MODESET))
403 + return -EINVAL;
404 +
405 mutex_lock(&dev->mode_config.mutex);
406
407 obj = drm_mode_object_find(dev, out_resp->connector_id, DRM_MODE_OBJECT_CONNECTOR);
408 @@ -2509,6 +2554,9 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev,
409 int size;
410 int ret = 0;
411
412 + if (!drm_core_check_feature(dev, DRIVER_MODESET))
413 + return -EINVAL;
414 +
415 mutex_lock(&dev->mode_config.mutex);
416 obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
417 if (!obj) {
418 @@ -2560,6 +2608,9 @@ int drm_mode_gamma_get_ioctl(struct drm_device *dev,
419 int size;
420 int ret = 0;
421
422 + if (!drm_core_check_feature(dev, DRIVER_MODESET))
423 + return -EINVAL;
424 +
425 mutex_lock(&dev->mode_config.mutex);
426 obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
427 if (!obj) {
428 diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
429 index ea1c4b0..c3c78ee 100644
430 --- a/drivers/gpu/drm/drm_gem.c
431 +++ b/drivers/gpu/drm/drm_gem.c
432 @@ -498,11 +498,12 @@ EXPORT_SYMBOL(drm_gem_vm_open);
433 void drm_gem_vm_close(struct vm_area_struct *vma)
434 {
435 struct drm_gem_object *obj = vma->vm_private_data;
436 + struct drm_device *dev = obj->dev;
437
438 - mutex_lock(&obj->dev->struct_mutex);
439 + mutex_lock(&dev->struct_mutex);
440 drm_vm_close_locked(vma);
441 drm_gem_object_unreference(obj);
442 - mutex_unlock(&obj->dev->struct_mutex);
443 + mutex_unlock(&dev->struct_mutex);
444 }
445 EXPORT_SYMBOL(drm_gem_vm_close);
446
447 diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
448 index 36e66cc..729c95a 100644
449 --- a/drivers/gpu/drm/i915/i915_gem.c
450 +++ b/drivers/gpu/drm/i915/i915_gem.c
451 @@ -1749,8 +1749,10 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
452 return;
453
454 spin_lock(&file_priv->mm.lock);
455 - list_del(&request->client_list);
456 - request->file_priv = NULL;
457 + if (request->file_priv) {
458 + list_del(&request->client_list);
459 + request->file_priv = NULL;
460 + }
461 spin_unlock(&file_priv->mm.lock);
462 }
463
464 diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
465 index 50ab161..ded73a6 100644
466 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
467 +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
468 @@ -388,6 +388,10 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
469 uint32_t __iomem *reloc_entry;
470 void __iomem *reloc_page;
471
472 + /* We can't wait for rendering with pagefaults disabled */
473 + if (obj->active && in_atomic())
474 + return -EFAULT;
475 +
476 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
477 if (ret)
478 return ret;
479 @@ -461,15 +465,24 @@ i915_gem_execbuffer_relocate(struct drm_device *dev,
480 struct list_head *objects)
481 {
482 struct drm_i915_gem_object *obj;
483 - int ret;
484 -
485 + int ret = 0;
486 +
487 + /* This is the fast path and we cannot handle a pagefault whilst
488 + * holding the struct mutex lest the user pass in the relocations
489 + * contained within a mmaped bo. For in such a case we, the page
490 + * fault handler would call i915_gem_fault() and we would try to
491 + * acquire the struct mutex again. Obviously this is bad and so
492 + * lockdep complains vehemently.
493 + */
494 + pagefault_disable();
495 list_for_each_entry(obj, objects, exec_list) {
496 ret = i915_gem_execbuffer_relocate_object(obj, eb);
497 if (ret)
498 - return ret;
499 + break;
500 }
501 + pagefault_enable();
502
503 - return 0;
504 + return ret;
505 }
506
507 static int
508 diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
509 index 4a5a73b..e967cc8 100644
510 --- a/drivers/gpu/drm/radeon/atombios_crtc.c
511 +++ b/drivers/gpu/drm/radeon/atombios_crtc.c
512 @@ -957,7 +957,11 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
513 /* adjust pixel clock as needed */
514 adjusted_clock = atombios_adjust_pll(crtc, mode, pll, ss_enabled, &ss);
515
516 - if (ASIC_IS_AVIVO(rdev))
517 + if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
518 + /* TV seems to prefer the legacy algo on some boards */
519 + radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
520 + &ref_div, &post_div);
521 + else if (ASIC_IS_AVIVO(rdev))
522 radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
523 &ref_div, &post_div);
524 else
525 diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
526 index cf7c8d5..cf602e2 100644
527 --- a/drivers/gpu/drm/radeon/radeon_combios.c
528 +++ b/drivers/gpu/drm/radeon/radeon_combios.c
529 @@ -448,7 +448,7 @@ static uint16_t combios_get_table_offset(struct drm_device *dev,
530
531 bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev)
532 {
533 - int edid_info;
534 + int edid_info, size;
535 struct edid *edid;
536 unsigned char *raw;
537 edid_info = combios_get_table_offset(rdev->ddev, COMBIOS_HARDCODED_EDID_TABLE);
538 @@ -456,11 +456,12 @@ bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev)
539 return false;
540
541 raw = rdev->bios + edid_info;
542 - edid = kmalloc(EDID_LENGTH * (raw[0x7e] + 1), GFP_KERNEL);
543 + size = EDID_LENGTH * (raw[0x7e] + 1);
544 + edid = kmalloc(size, GFP_KERNEL);
545 if (edid == NULL)
546 return false;
547
548 - memcpy((unsigned char *)edid, raw, EDID_LENGTH * (raw[0x7e] + 1));
549 + memcpy((unsigned char *)edid, raw, size);
550
551 if (!drm_edid_is_valid(edid)) {
552 kfree(edid);
553 @@ -468,6 +469,7 @@ bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev)
554 }
555
556 rdev->mode_info.bios_hardcoded_edid = edid;
557 + rdev->mode_info.bios_hardcoded_edid_size = size;
558 return true;
559 }
560
561 @@ -475,8 +477,17 @@ bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev)
562 struct edid *
563 radeon_bios_get_hardcoded_edid(struct radeon_device *rdev)
564 {
565 - if (rdev->mode_info.bios_hardcoded_edid)
566 - return rdev->mode_info.bios_hardcoded_edid;
567 + struct edid *edid;
568 +
569 + if (rdev->mode_info.bios_hardcoded_edid) {
570 + edid = kmalloc(rdev->mode_info.bios_hardcoded_edid_size, GFP_KERNEL);
571 + if (edid) {
572 + memcpy((unsigned char *)edid,
573 + (unsigned char *)rdev->mode_info.bios_hardcoded_edid,
574 + rdev->mode_info.bios_hardcoded_edid_size);
575 + return edid;
576 + }
577 + }
578 return NULL;
579 }
580
581 diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
582 index 22b7e3d..d83338b 100644
583 --- a/drivers/gpu/drm/radeon/radeon_connectors.c
584 +++ b/drivers/gpu/drm/radeon/radeon_connectors.c
585 @@ -629,6 +629,8 @@ static int radeon_vga_mode_valid(struct drm_connector *connector,
586 static enum drm_connector_status
587 radeon_vga_detect(struct drm_connector *connector, bool force)
588 {
589 + struct drm_device *dev = connector->dev;
590 + struct radeon_device *rdev = dev->dev_private;
591 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
592 struct drm_encoder *encoder;
593 struct drm_encoder_helper_funcs *encoder_funcs;
594 @@ -679,6 +681,17 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
595
596 if (ret == connector_status_connected)
597 ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, true);
598 +
599 + /* RN50 and some RV100 asics in servers often have a hardcoded EDID in the
600 + * vbios to deal with KVMs. If we have one and are not able to detect a monitor
601 + * by other means, assume the CRT is connected and use that EDID.
602 + */
603 + if ((!rdev->is_atom_bios) &&
604 + (ret == connector_status_disconnected) &&
605 + rdev->mode_info.bios_hardcoded_edid_size) {
606 + ret = connector_status_connected;
607 + }
608 +
609 radeon_connector_update_scratch_regs(connector, ret);
610 return ret;
611 }
612 @@ -790,6 +803,8 @@ static int radeon_dvi_get_modes(struct drm_connector *connector)
613 static enum drm_connector_status
614 radeon_dvi_detect(struct drm_connector *connector, bool force)
615 {
616 + struct drm_device *dev = connector->dev;
617 + struct radeon_device *rdev = dev->dev_private;
618 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
619 struct drm_encoder *encoder = NULL;
620 struct drm_encoder_helper_funcs *encoder_funcs;
621 @@ -829,8 +844,6 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
622 * you don't really know what's connected to which port as both are digital.
623 */
624 if (radeon_connector->shared_ddc && (ret == connector_status_connected)) {
625 - struct drm_device *dev = connector->dev;
626 - struct radeon_device *rdev = dev->dev_private;
627 struct drm_connector *list_connector;
628 struct radeon_connector *list_radeon_connector;
629 list_for_each_entry(list_connector, &dev->mode_config.connector_list, head) {
630 @@ -895,6 +908,19 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
631 ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, true);
632 }
633
634 + /* RN50 and some RV100 asics in servers often have a hardcoded EDID in the
635 + * vbios to deal with KVMs. If we have one and are not able to detect a monitor
636 + * by other means, assume the DFP is connected and use that EDID. In most
637 + * cases the DVI port is actually a virtual KVM port connected to the service
638 + * processor.
639 + */
640 + if ((!rdev->is_atom_bios) &&
641 + (ret == connector_status_disconnected) &&
642 + rdev->mode_info.bios_hardcoded_edid_size) {
643 + radeon_connector->use_digital = true;
644 + ret = connector_status_connected;
645 + }
646 +
647 out:
648 /* updated in get modes as well since we need to know if it's analog or digital */
649 radeon_connector_update_scratch_regs(connector, ret);
650 diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
651 index a670caa..8c134db 100644
652 --- a/drivers/gpu/drm/radeon/radeon_mode.h
653 +++ b/drivers/gpu/drm/radeon/radeon_mode.h
654 @@ -239,6 +239,7 @@ struct radeon_mode_info {
655 struct drm_property *underscan_vborder_property;
656 /* hardcoded DFP edid from BIOS */
657 struct edid *bios_hardcoded_edid;
658 + int bios_hardcoded_edid_size;
659
660 /* pointer to fbdev info structure */
661 struct radeon_fbdev *rfbdev;
662 diff --git a/drivers/input/xen-kbdfront.c b/drivers/input/xen-kbdfront.c
663 index 7f85a86..53e6273 100644
664 --- a/drivers/input/xen-kbdfront.c
665 +++ b/drivers/input/xen-kbdfront.c
666 @@ -110,7 +110,7 @@ static irqreturn_t input_handler(int rq, void *dev_id)
667 static int __devinit xenkbd_probe(struct xenbus_device *dev,
668 const struct xenbus_device_id *id)
669 {
670 - int ret, i;
671 + int ret, i, abs;
672 struct xenkbd_info *info;
673 struct input_dev *kbd, *ptr;
674
675 @@ -128,6 +128,11 @@ static int __devinit xenkbd_probe(struct xenbus_device *dev,
676 if (!info->page)
677 goto error_nomem;
678
679 + if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-abs-pointer", "%d", &abs) < 0)
680 + abs = 0;
681 + if (abs)
682 + xenbus_printf(XBT_NIL, dev->nodename, "request-abs-pointer", "1");
683 +
684 /* keyboard */
685 kbd = input_allocate_device();
686 if (!kbd)
687 @@ -137,11 +142,12 @@ static int __devinit xenkbd_probe(struct xenbus_device *dev,
688 kbd->id.bustype = BUS_PCI;
689 kbd->id.vendor = 0x5853;
690 kbd->id.product = 0xffff;
691 - kbd->evbit[0] = BIT(EV_KEY);
692 +
693 + __set_bit(EV_KEY, kbd->evbit);
694 for (i = KEY_ESC; i < KEY_UNKNOWN; i++)
695 - set_bit(i, kbd->keybit);
696 + __set_bit(i, kbd->keybit);
697 for (i = KEY_OK; i < KEY_MAX; i++)
698 - set_bit(i, kbd->keybit);
699 + __set_bit(i, kbd->keybit);
700
701 ret = input_register_device(kbd);
702 if (ret) {
703 @@ -160,12 +166,20 @@ static int __devinit xenkbd_probe(struct xenbus_device *dev,
704 ptr->id.bustype = BUS_PCI;
705 ptr->id.vendor = 0x5853;
706 ptr->id.product = 0xfffe;
707 - ptr->evbit[0] = BIT(EV_KEY) | BIT(EV_REL) | BIT(EV_ABS);
708 +
709 + if (abs) {
710 + __set_bit(EV_ABS, ptr->evbit);
711 + input_set_abs_params(ptr, ABS_X, 0, XENFB_WIDTH, 0, 0);
712 + input_set_abs_params(ptr, ABS_Y, 0, XENFB_HEIGHT, 0, 0);
713 + } else {
714 + input_set_capability(ptr, EV_REL, REL_X);
715 + input_set_capability(ptr, EV_REL, REL_Y);
716 + }
717 + input_set_capability(ptr, EV_REL, REL_WHEEL);
718 +
719 + __set_bit(EV_KEY, ptr->evbit);
720 for (i = BTN_LEFT; i <= BTN_TASK; i++)
721 - set_bit(i, ptr->keybit);
722 - ptr->relbit[0] = BIT(REL_X) | BIT(REL_Y) | BIT(REL_WHEEL);
723 - input_set_abs_params(ptr, ABS_X, 0, XENFB_WIDTH, 0, 0);
724 - input_set_abs_params(ptr, ABS_Y, 0, XENFB_HEIGHT, 0, 0);
725 + __set_bit(i, ptr->keybit);
726
727 ret = input_register_device(ptr);
728 if (ret) {
729 @@ -272,7 +286,7 @@ static void xenkbd_backend_changed(struct xenbus_device *dev,
730 enum xenbus_state backend_state)
731 {
732 struct xenkbd_info *info = dev_get_drvdata(&dev->dev);
733 - int ret, val;
734 + int val;
735
736 switch (backend_state) {
737 case XenbusStateInitialising:
738 @@ -285,16 +299,6 @@ static void xenkbd_backend_changed(struct xenbus_device *dev,
739
740 case XenbusStateInitWait:
741 InitWait:
742 - ret = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
743 - "feature-abs-pointer", "%d", &val);
744 - if (ret < 0)
745 - val = 0;
746 - if (val) {
747 - ret = xenbus_printf(XBT_NIL, info->xbdev->nodename,
748 - "request-abs-pointer", "1");
749 - if (ret)
750 - pr_warning("can't request abs-pointer\n");
751 - }
752 xenbus_switch_state(dev, XenbusStateConnected);
753 break;
754
755 diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c
756 index a1e9dfb..6459b8c 100644
757 --- a/drivers/media/video/uvc/uvc_driver.c
758 +++ b/drivers/media/video/uvc/uvc_driver.c
759 @@ -1264,6 +1264,14 @@ static int uvc_scan_chain_entity(struct uvc_video_chain *chain,
760
761 break;
762
763 + case UVC_OTT_VENDOR_SPECIFIC:
764 + case UVC_OTT_DISPLAY:
765 + case UVC_OTT_MEDIA_TRANSPORT_OUTPUT:
766 + if (uvc_trace_param & UVC_TRACE_PROBE)
767 + printk(" OT %d", entity->id);
768 +
769 + break;
770 +
771 case UVC_TT_STREAMING:
772 if (UVC_ENTITY_IS_ITERM(entity)) {
773 if (uvc_trace_param & UVC_TRACE_PROBE)
774 diff --git a/drivers/media/video/uvc/uvc_video.c b/drivers/media/video/uvc/uvc_video.c
775 index 5673d67..545c029 100644
776 --- a/drivers/media/video/uvc/uvc_video.c
777 +++ b/drivers/media/video/uvc/uvc_video.c
778 @@ -89,15 +89,19 @@ int uvc_query_ctrl(struct uvc_device *dev, __u8 query, __u8 unit,
779 static void uvc_fixup_video_ctrl(struct uvc_streaming *stream,
780 struct uvc_streaming_control *ctrl)
781 {
782 - struct uvc_format *format;
783 + struct uvc_format *format = NULL;
784 struct uvc_frame *frame = NULL;
785 unsigned int i;
786
787 - if (ctrl->bFormatIndex <= 0 ||
788 - ctrl->bFormatIndex > stream->nformats)
789 - return;
790 + for (i = 0; i < stream->nformats; ++i) {
791 + if (stream->format[i].index == ctrl->bFormatIndex) {
792 + format = &stream->format[i];
793 + break;
794 + }
795 + }
796
797 - format = &stream->format[ctrl->bFormatIndex - 1];
798 + if (format == NULL)
799 + return;
800
801 for (i = 0; i < format->nframes; ++i) {
802 if (format->frame[i].bFrameIndex == ctrl->bFrameIndex) {
803 diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
804 index cb23aa2..e610cfe 100644
805 --- a/drivers/pci/hotplug/acpiphp_glue.c
806 +++ b/drivers/pci/hotplug/acpiphp_glue.c
807 @@ -212,6 +212,7 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
808
809 pdev = pci_get_slot(pbus, PCI_DEVFN(device, function));
810 if (pdev) {
811 + pdev->current_state = PCI_D0;
812 slot->flags |= (SLOT_ENABLED | SLOT_POWEREDON);
813 pci_dev_put(pdev);
814 }
815 diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
816 index 4ab49d4..30bb8d0 100644
817 --- a/drivers/usb/class/cdc-acm.c
818 +++ b/drivers/usb/class/cdc-acm.c
819 @@ -297,6 +297,8 @@ static void acm_ctrl_irq(struct urb *urb)
820 if (!ACM_READY(acm))
821 goto exit;
822
823 + usb_mark_last_busy(acm->dev);
824 +
825 data = (unsigned char *)(dr + 1);
826 switch (dr->bNotificationType) {
827 case USB_CDC_NOTIFY_NETWORK_CONNECTION:
828 @@ -336,7 +338,6 @@ static void acm_ctrl_irq(struct urb *urb)
829 break;
830 }
831 exit:
832 - usb_mark_last_busy(acm->dev);
833 retval = usb_submit_urb(urb, GFP_ATOMIC);
834 if (retval)
835 dev_err(&urb->dev->dev, "%s - usb_submit_urb failed with "
836 @@ -533,6 +534,8 @@ static void acm_softint(struct work_struct *work)
837 if (!ACM_READY(acm))
838 return;
839 tty = tty_port_tty_get(&acm->port);
840 + if (!tty)
841 + return;
842 tty_wakeup(tty);
843 tty_kref_put(tty);
844 }
845 @@ -646,8 +649,10 @@ static void acm_port_down(struct acm *acm)
846 usb_kill_urb(acm->ctrlurb);
847 for (i = 0; i < ACM_NW; i++)
848 usb_kill_urb(acm->wb[i].urb);
849 + tasklet_disable(&acm->urb_task);
850 for (i = 0; i < nr; i++)
851 usb_kill_urb(acm->ru[i].urb);
852 + tasklet_enable(&acm->urb_task);
853 acm->control->needs_remote_wakeup = 0;
854 usb_autopm_put_interface(acm->control);
855 }
856 diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
857 index 47085e5..a97c018 100644
858 --- a/drivers/usb/class/cdc-wdm.c
859 +++ b/drivers/usb/class/cdc-wdm.c
860 @@ -281,7 +281,7 @@ static void cleanup(struct wdm_device *desc)
861 desc->sbuf,
862 desc->validity->transfer_dma);
863 usb_free_coherent(interface_to_usbdev(desc->intf),
864 - desc->wMaxCommand,
865 + desc->bMaxPacketSize0,
866 desc->inbuf,
867 desc->response->transfer_dma);
868 kfree(desc->orq);
869 diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
870 index a7131ad..37518df 100644
871 --- a/drivers/usb/core/devio.c
872 +++ b/drivers/usb/core/devio.c
873 @@ -802,7 +802,7 @@ static int proc_control(struct dev_state *ps, void __user *arg)
874 tbuf, ctrl.wLength, tmo);
875 usb_lock_device(dev);
876 snoop_urb(dev, NULL, pipe, max(i, 0), min(i, 0), COMPLETE,
877 - tbuf, i);
878 + tbuf, max(i, 0));
879 if ((i > 0) && ctrl.wLength) {
880 if (copy_to_user(ctrl.data, tbuf, i)) {
881 free_page((unsigned long)tbuf);
882 diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
883 index 233c288..5add8b5 100644
884 --- a/drivers/usb/host/ehci-q.c
885 +++ b/drivers/usb/host/ehci-q.c
886 @@ -315,7 +315,6 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
887 int stopped;
888 unsigned count = 0;
889 u8 state;
890 - const __le32 halt = HALT_BIT(ehci);
891 struct ehci_qh_hw *hw = qh->hw;
892
893 if (unlikely (list_empty (&qh->qtd_list)))
894 @@ -422,7 +421,6 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
895 && !(qtd->hw_alt_next
896 & EHCI_LIST_END(ehci))) {
897 stopped = 1;
898 - goto halt;
899 }
900
901 /* stop scanning when we reach qtds the hc is using */
902 @@ -456,16 +454,6 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
903 */
904 ehci_clear_tt_buffer(ehci, qh, urb, token);
905 }
906 -
907 - /* force halt for unlinked or blocked qh, so we'll
908 - * patch the qh later and so that completions can't
909 - * activate it while we "know" it's stopped.
910 - */
911 - if ((halt & hw->hw_token) == 0) {
912 -halt:
913 - hw->hw_token |= halt;
914 - wmb ();
915 - }
916 }
917
918 /* unless we already know the urb's status, collect qtd status
919 diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c
920 index f7a2057..8b1d94a 100644
921 --- a/drivers/usb/misc/uss720.c
922 +++ b/drivers/usb/misc/uss720.c
923 @@ -177,12 +177,11 @@ static struct uss720_async_request *submit_async_request(struct parport_uss720_p
924 spin_lock_irqsave(&priv->asynclock, flags);
925 list_add_tail(&rq->asynclist, &priv->asynclist);
926 spin_unlock_irqrestore(&priv->asynclock, flags);
927 + kref_get(&rq->ref_count);
928 ret = usb_submit_urb(rq->urb, mem_flags);
929 - if (!ret) {
930 - kref_get(&rq->ref_count);
931 + if (!ret)
932 return rq;
933 - }
934 - kref_put(&rq->ref_count, destroy_async);
935 + destroy_async(&rq->ref_count);
936 err("submit_async_request submit_urb failed with %d", ret);
937 return NULL;
938 }
939 diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c
940 index 9d49d1c..52312e8 100644
941 --- a/drivers/usb/musb/blackfin.c
942 +++ b/drivers/usb/musb/blackfin.c
943 @@ -322,7 +322,7 @@ static void bfin_musb_try_idle(struct musb *musb, unsigned long timeout)
944 mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY);
945 }
946
947 -static int bfin_musb_get_vbus_status(struct musb *musb)
948 +static int bfin_musb_vbus_status(struct musb *musb)
949 {
950 return 0;
951 }
952 @@ -540,7 +540,7 @@ static struct dev_pm_ops bfin_pm_ops = {
953 .resume = bfin_resume,
954 };
955
956 -#define DEV_PM_OPS &bfin_pm_op,
957 +#define DEV_PM_OPS &bfin_pm_ops
958 #else
959 #define DEV_PM_OPS NULL
960 #endif
961 @@ -548,7 +548,7 @@ static struct dev_pm_ops bfin_pm_ops = {
962 static struct platform_driver bfin_driver = {
963 .remove = __exit_p(bfin_remove),
964 .driver = {
965 - .name = "musb-bfin",
966 + .name = "musb-blackfin",
967 .pm = DEV_PM_OPS,
968 },
969 };
970 diff --git a/drivers/video/console/tileblit.c b/drivers/video/console/tileblit.c
971 index 0056a41..15e8e1a 100644
972 --- a/drivers/video/console/tileblit.c
973 +++ b/drivers/video/console/tileblit.c
974 @@ -83,7 +83,7 @@ static void tile_cursor(struct vc_data *vc, struct fb_info *info, int mode,
975 int softback_lines, int fg, int bg)
976 {
977 struct fb_tilecursor cursor;
978 - int use_sw = (vc->vc_cursor_type & 0x01);
979 + int use_sw = (vc->vc_cursor_type & 0x10);
980
981 cursor.sx = vc->vc_x;
982 cursor.sy = vc->vc_y;
983 diff --git a/fs/aio.c b/fs/aio.c
984 index 26869cd..88f0ed5 100644
985 --- a/fs/aio.c
986 +++ b/fs/aio.c
987 @@ -520,7 +520,7 @@ static inline void really_put_req(struct kioctx *ctx, struct kiocb *req)
988 ctx->reqs_active--;
989
990 if (unlikely(!ctx->reqs_active && ctx->dead))
991 - wake_up(&ctx->wait);
992 + wake_up_all(&ctx->wait);
993 }
994
995 static void aio_fput_routine(struct work_struct *data)
996 @@ -1229,7 +1229,7 @@ static void io_destroy(struct kioctx *ioctx)
997 * by other CPUs at this point. Right now, we rely on the
998 * locking done by the above calls to ensure this consistency.
999 */
1000 - wake_up(&ioctx->wait);
1001 + wake_up_all(&ioctx->wait);
1002 put_ioctx(ioctx); /* once for the lookup */
1003 }
1004
1005 diff --git a/fs/dcache.c b/fs/dcache.c
1006 index a39fe47..1baddc1 100644
1007 --- a/fs/dcache.c
1008 +++ b/fs/dcache.c
1009 @@ -1612,10 +1612,13 @@ struct dentry *d_obtain_alias(struct inode *inode)
1010 __bit_spin_unlock(0, (unsigned long *)&tmp->d_sb->s_anon.first);
1011 spin_unlock(&tmp->d_lock);
1012 spin_unlock(&inode->i_lock);
1013 + security_d_instantiate(tmp, inode);
1014
1015 return tmp;
1016
1017 out_iput:
1018 + if (res && !IS_ERR(res))
1019 + security_d_instantiate(res, inode);
1020 iput(inode);
1021 return res;
1022 }
1023 diff --git a/fs/ext3/super.c b/fs/ext3/super.c
1024 index 85c8cc8..0d62f29 100644
1025 --- a/fs/ext3/super.c
1026 +++ b/fs/ext3/super.c
1027 @@ -1464,6 +1464,13 @@ static void ext3_orphan_cleanup (struct super_block * sb,
1028 return;
1029 }
1030
1031 + /* Check if feature set allows readwrite operations */
1032 + if (EXT3_HAS_RO_COMPAT_FEATURE(sb, ~EXT3_FEATURE_RO_COMPAT_SUPP)) {
1033 + ext3_msg(sb, KERN_INFO, "Skipping orphan cleanup due to "
1034 + "unknown ROCOMPAT features");
1035 + return;
1036 + }
1037 +
1038 if (EXT3_SB(sb)->s_mount_state & EXT3_ERROR_FS) {
1039 if (es->s_last_orphan)
1040 jbd_debug(1, "Errors on filesystem, "
1041 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
1042 index f6a318f..4381efe 100644
1043 --- a/fs/ext4/super.c
1044 +++ b/fs/ext4/super.c
1045 @@ -75,6 +75,7 @@ static void ext4_write_super(struct super_block *sb);
1046 static int ext4_freeze(struct super_block *sb);
1047 static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,
1048 const char *dev_name, void *data);
1049 +static int ext4_feature_set_ok(struct super_block *sb, int readonly);
1050 static void ext4_destroy_lazyinit_thread(void);
1051 static void ext4_unregister_li_request(struct super_block *sb);
1052 static void ext4_clear_request_list(void);
1053 @@ -2120,6 +2121,13 @@ static void ext4_orphan_cleanup(struct super_block *sb,
1054 return;
1055 }
1056
1057 + /* Check if feature set would not allow a r/w mount */
1058 + if (!ext4_feature_set_ok(sb, 0)) {
1059 + ext4_msg(sb, KERN_INFO, "Skipping orphan cleanup due to "
1060 + "unknown ROCOMPAT features");
1061 + return;
1062 + }
1063 +
1064 if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
1065 if (es->s_last_orphan)
1066 jbd_debug(1, "Errors on filesystem, "
1067 diff --git a/fs/namespace.c b/fs/namespace.c
1068 index d1edf26..445534b 100644
1069 --- a/fs/namespace.c
1070 +++ b/fs/namespace.c
1071 @@ -2469,9 +2469,6 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
1072 error = user_path_dir(new_root, &new);
1073 if (error)
1074 goto out0;
1075 - error = -EINVAL;
1076 - if (!check_mnt(new.mnt))
1077 - goto out1;
1078
1079 error = user_path_dir(put_old, &old);
1080 if (error)
1081 @@ -2491,7 +2488,7 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
1082 IS_MNT_SHARED(new.mnt->mnt_parent) ||
1083 IS_MNT_SHARED(root.mnt->mnt_parent))
1084 goto out2;
1085 - if (!check_mnt(root.mnt))
1086 + if (!check_mnt(root.mnt) || !check_mnt(new.mnt))
1087 goto out2;
1088 error = -ENOENT;
1089 if (cant_mount(old.dentry))
1090 @@ -2515,19 +2512,19 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
1091 goto out2; /* not attached */
1092 /* make sure we can reach put_old from new_root */
1093 tmp = old.mnt;
1094 - br_write_lock(vfsmount_lock);
1095 if (tmp != new.mnt) {
1096 for (;;) {
1097 if (tmp->mnt_parent == tmp)
1098 - goto out3; /* already mounted on put_old */
1099 + goto out2; /* already mounted on put_old */
1100 if (tmp->mnt_parent == new.mnt)
1101 break;
1102 tmp = tmp->mnt_parent;
1103 }
1104 if (!is_subdir(tmp->mnt_mountpoint, new.dentry))
1105 - goto out3;
1106 + goto out2;
1107 } else if (!is_subdir(old.dentry, new.dentry))
1108 - goto out3;
1109 + goto out2;
1110 + br_write_lock(vfsmount_lock);
1111 detach_mnt(new.mnt, &parent_path);
1112 detach_mnt(root.mnt, &root_parent);
1113 /* mount old root on put_old */
1114 @@ -2550,9 +2547,6 @@ out1:
1115 path_put(&new);
1116 out0:
1117 return error;
1118 -out3:
1119 - br_write_unlock(vfsmount_lock);
1120 - goto out2;
1121 }
1122
1123 static void __init init_mount_tree(void)
1124 diff --git a/fs/nfs/write.c b/fs/nfs/write.c
1125 index 42b92d7..b5fcbf7 100644
1126 --- a/fs/nfs/write.c
1127 +++ b/fs/nfs/write.c
1128 @@ -1214,13 +1214,17 @@ int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
1129 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1130 static int nfs_commit_set_lock(struct nfs_inode *nfsi, int may_wait)
1131 {
1132 + int ret;
1133 +
1134 if (!test_and_set_bit(NFS_INO_COMMIT, &nfsi->flags))
1135 return 1;
1136 - if (may_wait && !out_of_line_wait_on_bit_lock(&nfsi->flags,
1137 - NFS_INO_COMMIT, nfs_wait_bit_killable,
1138 - TASK_KILLABLE))
1139 - return 1;
1140 - return 0;
1141 + if (!may_wait)
1142 + return 0;
1143 + ret = out_of_line_wait_on_bit_lock(&nfsi->flags,
1144 + NFS_INO_COMMIT,
1145 + nfs_wait_bit_killable,
1146 + TASK_KILLABLE);
1147 + return (ret < 0) ? ret : 1;
1148 }
1149
1150 static void nfs_commit_clear_lock(struct nfs_inode *nfsi)
1151 @@ -1396,9 +1400,10 @@ int nfs_commit_inode(struct inode *inode, int how)
1152 {
1153 LIST_HEAD(head);
1154 int may_wait = how & FLUSH_SYNC;
1155 - int res = 0;
1156 + int res;
1157
1158 - if (!nfs_commit_set_lock(NFS_I(inode), may_wait))
1159 + res = nfs_commit_set_lock(NFS_I(inode), may_wait);
1160 + if (res <= 0)
1161 goto out_mark_dirty;
1162 spin_lock(&inode->i_lock);
1163 res = nfs_scan_commit(inode, &head, 0, 0);
1164 @@ -1407,12 +1412,14 @@ int nfs_commit_inode(struct inode *inode, int how)
1165 int error = nfs_commit_list(inode, &head, how);
1166 if (error < 0)
1167 return error;
1168 - if (may_wait)
1169 - wait_on_bit(&NFS_I(inode)->flags, NFS_INO_COMMIT,
1170 - nfs_wait_bit_killable,
1171 - TASK_KILLABLE);
1172 - else
1173 + if (!may_wait)
1174 goto out_mark_dirty;
1175 + error = wait_on_bit(&NFS_I(inode)->flags,
1176 + NFS_INO_COMMIT,
1177 + nfs_wait_bit_killable,
1178 + TASK_KILLABLE);
1179 + if (error < 0)
1180 + return error;
1181 } else
1182 nfs_commit_clear_lock(NFS_I(inode));
1183 return res;
1184 diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
1185 index db52546..5fcb139 100644
1186 --- a/fs/nfsd/nfs4proc.c
1187 +++ b/fs/nfsd/nfs4proc.c
1188 @@ -984,8 +984,8 @@ typedef __be32(*nfsd4op_func)(struct svc_rqst *, struct nfsd4_compound_state *,
1189 void *);
1190 enum nfsd4_op_flags {
1191 ALLOWED_WITHOUT_FH = 1 << 0, /* No current filehandle required */
1192 - ALLOWED_ON_ABSENT_FS = 2 << 0, /* ops processed on absent fs */
1193 - ALLOWED_AS_FIRST_OP = 3 << 0, /* ops reqired first in compound */
1194 + ALLOWED_ON_ABSENT_FS = 1 << 1, /* ops processed on absent fs */
1195 + ALLOWED_AS_FIRST_OP = 1 << 2, /* ops reqired first in compound */
1196 };
1197
1198 struct nfsd4_operation {
1199 diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
1200 index 7b566ec..f0e448a 100644
1201 --- a/fs/nfsd/nfs4state.c
1202 +++ b/fs/nfsd/nfs4state.c
1203 @@ -316,64 +316,6 @@ static struct list_head unconf_id_hashtbl[CLIENT_HASH_SIZE];
1204 static struct list_head client_lru;
1205 static struct list_head close_lru;
1206
1207 -static void unhash_generic_stateid(struct nfs4_stateid *stp)
1208 -{
1209 - list_del(&stp->st_hash);
1210 - list_del(&stp->st_perfile);
1211 - list_del(&stp->st_perstateowner);
1212 -}
1213 -
1214 -static void free_generic_stateid(struct nfs4_stateid *stp)
1215 -{
1216 - put_nfs4_file(stp->st_file);
1217 - kmem_cache_free(stateid_slab, stp);
1218 -}
1219 -
1220 -static void release_lock_stateid(struct nfs4_stateid *stp)
1221 -{
1222 - struct file *file;
1223 -
1224 - unhash_generic_stateid(stp);
1225 - file = find_any_file(stp->st_file);
1226 - if (file)
1227 - locks_remove_posix(file, (fl_owner_t)stp->st_stateowner);
1228 - free_generic_stateid(stp);
1229 -}
1230 -
1231 -static void unhash_lockowner(struct nfs4_stateowner *sop)
1232 -{
1233 - struct nfs4_stateid *stp;
1234 -
1235 - list_del(&sop->so_idhash);
1236 - list_del(&sop->so_strhash);
1237 - list_del(&sop->so_perstateid);
1238 - while (!list_empty(&sop->so_stateids)) {
1239 - stp = list_first_entry(&sop->so_stateids,
1240 - struct nfs4_stateid, st_perstateowner);
1241 - release_lock_stateid(stp);
1242 - }
1243 -}
1244 -
1245 -static void release_lockowner(struct nfs4_stateowner *sop)
1246 -{
1247 - unhash_lockowner(sop);
1248 - nfs4_put_stateowner(sop);
1249 -}
1250 -
1251 -static void
1252 -release_stateid_lockowners(struct nfs4_stateid *open_stp)
1253 -{
1254 - struct nfs4_stateowner *lock_sop;
1255 -
1256 - while (!list_empty(&open_stp->st_lockowners)) {
1257 - lock_sop = list_entry(open_stp->st_lockowners.next,
1258 - struct nfs4_stateowner, so_perstateid);
1259 - /* list_del(&open_stp->st_lockowners); */
1260 - BUG_ON(lock_sop->so_is_open_owner);
1261 - release_lockowner(lock_sop);
1262 - }
1263 -}
1264 -
1265 /*
1266 * We store the NONE, READ, WRITE, and BOTH bits separately in the
1267 * st_{access,deny}_bmap field of the stateid, in order to track not
1268 @@ -446,13 +388,71 @@ static int nfs4_access_bmap_to_omode(struct nfs4_stateid *stp)
1269 return nfs4_access_to_omode(access);
1270 }
1271
1272 -static void release_open_stateid(struct nfs4_stateid *stp)
1273 +static void unhash_generic_stateid(struct nfs4_stateid *stp)
1274 +{
1275 + list_del(&stp->st_hash);
1276 + list_del(&stp->st_perfile);
1277 + list_del(&stp->st_perstateowner);
1278 +}
1279 +
1280 +static void free_generic_stateid(struct nfs4_stateid *stp)
1281 {
1282 int oflag = nfs4_access_bmap_to_omode(stp);
1283
1284 + nfs4_file_put_access(stp->st_file, oflag);
1285 + put_nfs4_file(stp->st_file);
1286 + kmem_cache_free(stateid_slab, stp);
1287 +}
1288 +
1289 +static void release_lock_stateid(struct nfs4_stateid *stp)
1290 +{
1291 + struct file *file;
1292 +
1293 + unhash_generic_stateid(stp);
1294 + file = find_any_file(stp->st_file);
1295 + if (file)
1296 + locks_remove_posix(file, (fl_owner_t)stp->st_stateowner);
1297 + free_generic_stateid(stp);
1298 +}
1299 +
1300 +static void unhash_lockowner(struct nfs4_stateowner *sop)
1301 +{
1302 + struct nfs4_stateid *stp;
1303 +
1304 + list_del(&sop->so_idhash);
1305 + list_del(&sop->so_strhash);
1306 + list_del(&sop->so_perstateid);
1307 + while (!list_empty(&sop->so_stateids)) {
1308 + stp = list_first_entry(&sop->so_stateids,
1309 + struct nfs4_stateid, st_perstateowner);
1310 + release_lock_stateid(stp);
1311 + }
1312 +}
1313 +
1314 +static void release_lockowner(struct nfs4_stateowner *sop)
1315 +{
1316 + unhash_lockowner(sop);
1317 + nfs4_put_stateowner(sop);
1318 +}
1319 +
1320 +static void
1321 +release_stateid_lockowners(struct nfs4_stateid *open_stp)
1322 +{
1323 + struct nfs4_stateowner *lock_sop;
1324 +
1325 + while (!list_empty(&open_stp->st_lockowners)) {
1326 + lock_sop = list_entry(open_stp->st_lockowners.next,
1327 + struct nfs4_stateowner, so_perstateid);
1328 + /* list_del(&open_stp->st_lockowners); */
1329 + BUG_ON(lock_sop->so_is_open_owner);
1330 + release_lockowner(lock_sop);
1331 + }
1332 +}
1333 +
1334 +static void release_open_stateid(struct nfs4_stateid *stp)
1335 +{
1336 unhash_generic_stateid(stp);
1337 release_stateid_lockowners(stp);
1338 - nfs4_file_put_access(stp->st_file, oflag);
1339 free_generic_stateid(stp);
1340 }
1341
1342 @@ -3735,6 +3735,7 @@ alloc_init_lock_stateid(struct nfs4_stateowner *sop, struct nfs4_file *fp, struc
1343 stp->st_stateid.si_stateownerid = sop->so_id;
1344 stp->st_stateid.si_fileid = fp->fi_id;
1345 stp->st_stateid.si_generation = 0;
1346 + stp->st_access_bmap = 0;
1347 stp->st_deny_bmap = open_stp->st_deny_bmap;
1348 stp->st_openstp = open_stp;
1349
1350 @@ -3749,6 +3750,17 @@ check_lock_length(u64 offset, u64 length)
1351 LOFF_OVERFLOW(offset, length)));
1352 }
1353
1354 +static void get_lock_access(struct nfs4_stateid *lock_stp, u32 access)
1355 +{
1356 + struct nfs4_file *fp = lock_stp->st_file;
1357 + int oflag = nfs4_access_to_omode(access);
1358 +
1359 + if (test_bit(access, &lock_stp->st_access_bmap))
1360 + return;
1361 + nfs4_file_get_access(fp, oflag);
1362 + __set_bit(access, &lock_stp->st_access_bmap);
1363 +}
1364 +
1365 /*
1366 * LOCK operation
1367 */
1368 @@ -3765,7 +3777,6 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
1369 struct file_lock conflock;
1370 __be32 status = 0;
1371 unsigned int strhashval;
1372 - unsigned int cmd;
1373 int err;
1374
1375 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
1376 @@ -3847,22 +3858,18 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
1377 switch (lock->lk_type) {
1378 case NFS4_READ_LT:
1379 case NFS4_READW_LT:
1380 - if (find_readable_file(lock_stp->st_file)) {
1381 - nfs4_get_vfs_file(rqstp, fp, &cstate->current_fh, NFS4_SHARE_ACCESS_READ);
1382 - filp = find_readable_file(lock_stp->st_file);
1383 - }
1384 + filp = find_readable_file(lock_stp->st_file);
1385 + if (filp)
1386 + get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ);
1387 file_lock.fl_type = F_RDLCK;
1388 - cmd = F_SETLK;
1389 - break;
1390 + break;
1391 case NFS4_WRITE_LT:
1392 case NFS4_WRITEW_LT:
1393 - if (find_writeable_file(lock_stp->st_file)) {
1394 - nfs4_get_vfs_file(rqstp, fp, &cstate->current_fh, NFS4_SHARE_ACCESS_WRITE);
1395 - filp = find_writeable_file(lock_stp->st_file);
1396 - }
1397 + filp = find_writeable_file(lock_stp->st_file);
1398 + if (filp)
1399 + get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE);
1400 file_lock.fl_type = F_WRLCK;
1401 - cmd = F_SETLK;
1402 - break;
1403 + break;
1404 default:
1405 status = nfserr_inval;
1406 goto out;
1407 @@ -3886,7 +3893,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
1408 * Note: locks.c uses the BKL to protect the inode's lock list.
1409 */
1410
1411 - err = vfs_lock_file(filp, cmd, &file_lock, &conflock);
1412 + err = vfs_lock_file(filp, F_SETLK, &file_lock, &conflock);
1413 switch (-err) {
1414 case 0: /* success! */
1415 update_stateid(&lock_stp->st_stateid);
1416 diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
1417 index 615f0a9..c6766af 100644
1418 --- a/fs/nfsd/nfs4xdr.c
1419 +++ b/fs/nfsd/nfs4xdr.c
1420 @@ -1142,7 +1142,7 @@ nfsd4_decode_create_session(struct nfsd4_compoundargs *argp,
1421
1422 u32 dummy;
1423 char *machine_name;
1424 - int i, j;
1425 + int i;
1426 int nr_secflavs;
1427
1428 READ_BUF(16);
1429 @@ -1215,8 +1215,6 @@ nfsd4_decode_create_session(struct nfsd4_compoundargs *argp,
1430 READ_BUF(4);
1431 READ32(dummy);
1432 READ_BUF(dummy * 4);
1433 - for (j = 0; j < dummy; ++j)
1434 - READ32(dummy);
1435 break;
1436 case RPC_AUTH_GSS:
1437 dprintk("RPC_AUTH_GSS callback secflavor "
1438 @@ -1232,7 +1230,6 @@ nfsd4_decode_create_session(struct nfsd4_compoundargs *argp,
1439 READ_BUF(4);
1440 READ32(dummy);
1441 READ_BUF(dummy);
1442 - p += XDR_QUADLEN(dummy);
1443 break;
1444 default:
1445 dprintk("Illegal callback secflavor\n");
1446 diff --git a/fs/proc/array.c b/fs/proc/array.c
1447 index 7c99c1c..5e4f776 100644
1448 --- a/fs/proc/array.c
1449 +++ b/fs/proc/array.c
1450 @@ -489,8 +489,8 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
1451 vsize,
1452 mm ? get_mm_rss(mm) : 0,
1453 rsslim,
1454 - mm ? mm->start_code : 0,
1455 - mm ? mm->end_code : 0,
1456 + mm ? (permitted ? mm->start_code : 1) : 0,
1457 + mm ? (permitted ? mm->end_code : 1) : 0,
1458 (permitted && mm) ? mm->start_stack : 0,
1459 esp,
1460 eip,
1461 diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
1462 index 60b9148..f269ee6 100644
1463 --- a/fs/proc/task_mmu.c
1464 +++ b/fs/proc/task_mmu.c
1465 @@ -249,8 +249,8 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
1466 const char *name = arch_vma_name(vma);
1467 if (!name) {
1468 if (mm) {
1469 - if (vma->vm_start <= mm->start_brk &&
1470 - vma->vm_end >= mm->brk) {
1471 + if (vma->vm_start <= mm->brk &&
1472 + vma->vm_end >= mm->start_brk) {
1473 name = "[heap]";
1474 } else if (vma->vm_start <= mm->start_stack &&
1475 vma->vm_end >= mm->start_stack) {
1476 diff --git a/fs/super.c b/fs/super.c
1477 index 7e9dd4c..0d89e93 100644
1478 --- a/fs/super.c
1479 +++ b/fs/super.c
1480 @@ -71,6 +71,7 @@ static struct super_block *alloc_super(struct file_system_type *type)
1481 #else
1482 INIT_LIST_HEAD(&s->s_files);
1483 #endif
1484 + s->s_bdi = &default_backing_dev_info;
1485 INIT_LIST_HEAD(&s->s_instances);
1486 INIT_HLIST_BL_HEAD(&s->s_anon);
1487 INIT_LIST_HEAD(&s->s_inodes);
1488 @@ -1003,6 +1004,7 @@ vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void
1489 }
1490 BUG_ON(!mnt->mnt_sb);
1491 WARN_ON(!mnt->mnt_sb->s_bdi);
1492 + WARN_ON(mnt->mnt_sb->s_bdi == &default_backing_dev_info);
1493 mnt->mnt_sb->s_flags |= MS_BORN;
1494
1495 error = security_sb_kern_mount(mnt->mnt_sb, flags, secdata);
1496 diff --git a/fs/sync.c b/fs/sync.c
1497 index ba76b96..412dc89 100644
1498 --- a/fs/sync.c
1499 +++ b/fs/sync.c
1500 @@ -33,7 +33,7 @@ static int __sync_filesystem(struct super_block *sb, int wait)
1501 * This should be safe, as we require bdi backing to actually
1502 * write out data in the first place
1503 */
1504 - if (!sb->s_bdi || sb->s_bdi == &noop_backing_dev_info)
1505 + if (sb->s_bdi == &noop_backing_dev_info)
1506 return 0;
1507
1508 if (sb->s_qcop && sb->s_qcop->quota_sync)
1509 @@ -79,7 +79,7 @@ EXPORT_SYMBOL_GPL(sync_filesystem);
1510
1511 static void sync_one_sb(struct super_block *sb, void *arg)
1512 {
1513 - if (!(sb->s_flags & MS_RDONLY) && sb->s_bdi)
1514 + if (!(sb->s_flags & MS_RDONLY))
1515 __sync_filesystem(sb, *(int *)arg);
1516 }
1517 /*
1518 diff --git a/include/linux/compaction.h b/include/linux/compaction.h
1519 index dfa2ed4..cc9f7a4 100644
1520 --- a/include/linux/compaction.h
1521 +++ b/include/linux/compaction.h
1522 @@ -11,9 +11,6 @@
1523 /* The full zone was compacted */
1524 #define COMPACT_COMPLETE 3
1525
1526 -#define COMPACT_MODE_DIRECT_RECLAIM 0
1527 -#define COMPACT_MODE_KSWAPD 1
1528 -
1529 #ifdef CONFIG_COMPACTION
1530 extern int sysctl_compact_memory;
1531 extern int sysctl_compaction_handler(struct ctl_table *table, int write,
1532 @@ -28,8 +25,7 @@ extern unsigned long try_to_compact_pages(struct zonelist *zonelist,
1533 bool sync);
1534 extern unsigned long compaction_suitable(struct zone *zone, int order);
1535 extern unsigned long compact_zone_order(struct zone *zone, int order,
1536 - gfp_t gfp_mask, bool sync,
1537 - int compact_mode);
1538 + gfp_t gfp_mask, bool sync);
1539
1540 /* Do not skip compaction more than 64 times */
1541 #define COMPACT_MAX_DEFER_SHIFT 6
1542 @@ -74,8 +70,7 @@ static inline unsigned long compaction_suitable(struct zone *zone, int order)
1543 }
1544
1545 static inline unsigned long compact_zone_order(struct zone *zone, int order,
1546 - gfp_t gfp_mask, bool sync,
1547 - int compact_mode)
1548 + gfp_t gfp_mask, bool sync)
1549 {
1550 return COMPACT_CONTINUE;
1551 }
1552 diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
1553 index 1908929..a3c1874 100644
1554 --- a/include/linux/ethtool.h
1555 +++ b/include/linux/ethtool.h
1556 @@ -13,6 +13,9 @@
1557 #ifndef _LINUX_ETHTOOL_H
1558 #define _LINUX_ETHTOOL_H
1559
1560 +#ifdef __KERNEL__
1561 +#include <linux/compat.h>
1562 +#endif
1563 #include <linux/types.h>
1564 #include <linux/if_ether.h>
1565
1566 @@ -449,6 +452,37 @@ struct ethtool_rxnfc {
1567 __u32 rule_locs[0];
1568 };
1569
1570 +#ifdef __KERNEL__
1571 +#ifdef CONFIG_COMPAT
1572 +
1573 +struct compat_ethtool_rx_flow_spec {
1574 + u32 flow_type;
1575 + union {
1576 + struct ethtool_tcpip4_spec tcp_ip4_spec;
1577 + struct ethtool_tcpip4_spec udp_ip4_spec;
1578 + struct ethtool_tcpip4_spec sctp_ip4_spec;
1579 + struct ethtool_ah_espip4_spec ah_ip4_spec;
1580 + struct ethtool_ah_espip4_spec esp_ip4_spec;
1581 + struct ethtool_usrip4_spec usr_ip4_spec;
1582 + struct ethhdr ether_spec;
1583 + u8 hdata[72];
1584 + } h_u, m_u;
1585 + compat_u64 ring_cookie;
1586 + u32 location;
1587 +};
1588 +
1589 +struct compat_ethtool_rxnfc {
1590 + u32 cmd;
1591 + u32 flow_type;
1592 + compat_u64 data;
1593 + struct compat_ethtool_rx_flow_spec fs;
1594 + u32 rule_cnt;
1595 + u32 rule_locs[0];
1596 +};
1597 +
1598 +#endif /* CONFIG_COMPAT */
1599 +#endif /* __KERNEL__ */
1600 +
1601 /**
1602 * struct ethtool_rxfh_indir - command to get or set RX flow hash indirection
1603 * @cmd: Specific command number - %ETHTOOL_GRXFHINDIR or %ETHTOOL_SRXFHINDIR
1604 diff --git a/include/linux/mm.h b/include/linux/mm.h
1605 index f6385fc..c67adb4 100644
1606 --- a/include/linux/mm.h
1607 +++ b/include/linux/mm.h
1608 @@ -402,16 +402,23 @@ static inline void init_page_count(struct page *page)
1609 /*
1610 * PageBuddy() indicate that the page is free and in the buddy system
1611 * (see mm/page_alloc.c).
1612 + *
1613 + * PAGE_BUDDY_MAPCOUNT_VALUE must be <= -2 but better not too close to
1614 + * -2 so that an underflow of the page_mapcount() won't be mistaken
1615 + * for a genuine PAGE_BUDDY_MAPCOUNT_VALUE. -128 can be created very
1616 + * efficiently by most CPU architectures.
1617 */
1618 +#define PAGE_BUDDY_MAPCOUNT_VALUE (-128)
1619 +
1620 static inline int PageBuddy(struct page *page)
1621 {
1622 - return atomic_read(&page->_mapcount) == -2;
1623 + return atomic_read(&page->_mapcount) == PAGE_BUDDY_MAPCOUNT_VALUE;
1624 }
1625
1626 static inline void __SetPageBuddy(struct page *page)
1627 {
1628 VM_BUG_ON(atomic_read(&page->_mapcount) != -1);
1629 - atomic_set(&page->_mapcount, -2);
1630 + atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE);
1631 }
1632
1633 static inline void __ClearPageBuddy(struct page *page)
1634 diff --git a/kernel/cgroup.c b/kernel/cgroup.c
1635 index b24d702..bcc7336 100644
1636 --- a/kernel/cgroup.c
1637 +++ b/kernel/cgroup.c
1638 @@ -1813,10 +1813,8 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
1639
1640 /* Update the css_set linked lists if we're using them */
1641 write_lock(&css_set_lock);
1642 - if (!list_empty(&tsk->cg_list)) {
1643 - list_del(&tsk->cg_list);
1644 - list_add(&tsk->cg_list, &newcg->tasks);
1645 - }
1646 + if (!list_empty(&tsk->cg_list))
1647 + list_move(&tsk->cg_list, &newcg->tasks);
1648 write_unlock(&css_set_lock);
1649
1650 for_each_subsys(root, ss) {
1651 @@ -3655,12 +3653,12 @@ again:
1652 spin_lock(&release_list_lock);
1653 set_bit(CGRP_REMOVED, &cgrp->flags);
1654 if (!list_empty(&cgrp->release_list))
1655 - list_del(&cgrp->release_list);
1656 + list_del_init(&cgrp->release_list);
1657 spin_unlock(&release_list_lock);
1658
1659 cgroup_lock_hierarchy(cgrp->root);
1660 /* delete this cgroup from parent->children */
1661 - list_del(&cgrp->sibling);
1662 + list_del_init(&cgrp->sibling);
1663 cgroup_unlock_hierarchy(cgrp->root);
1664
1665 d = dget(cgrp->dentry);
1666 @@ -3879,7 +3877,7 @@ void cgroup_unload_subsys(struct cgroup_subsys *ss)
1667 subsys[ss->subsys_id] = NULL;
1668
1669 /* remove subsystem from rootnode's list of subsystems */
1670 - list_del(&ss->sibling);
1671 + list_del_init(&ss->sibling);
1672
1673 /*
1674 * disentangle the css from all css_sets attached to the dummytop. as
1675 @@ -4253,7 +4251,7 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks)
1676 if (!list_empty(&tsk->cg_list)) {
1677 write_lock(&css_set_lock);
1678 if (!list_empty(&tsk->cg_list))
1679 - list_del(&tsk->cg_list);
1680 + list_del_init(&tsk->cg_list);
1681 write_unlock(&css_set_lock);
1682 }
1683
1684 diff --git a/kernel/perf_event.c b/kernel/perf_event.c
1685 index b22a2ef..ad02fea 100644
1686 --- a/kernel/perf_event.c
1687 +++ b/kernel/perf_event.c
1688 @@ -6115,17 +6115,20 @@ __perf_event_exit_task(struct perf_event *child_event,
1689 struct perf_event_context *child_ctx,
1690 struct task_struct *child)
1691 {
1692 - struct perf_event *parent_event;
1693 + if (child_event->parent) {
1694 + raw_spin_lock_irq(&child_ctx->lock);
1695 + perf_group_detach(child_event);
1696 + raw_spin_unlock_irq(&child_ctx->lock);
1697 + }
1698
1699 perf_event_remove_from_context(child_event);
1700
1701 - parent_event = child_event->parent;
1702 /*
1703 - * It can happen that parent exits first, and has events
1704 + * It can happen that the parent exits first, and has events
1705 * that are still around due to the child reference. These
1706 - * events need to be zapped - but otherwise linger.
1707 + * events need to be zapped.
1708 */
1709 - if (parent_event) {
1710 + if (child_event->parent) {
1711 sync_child_event(child_event, child);
1712 free_event(child_event);
1713 }
1714 diff --git a/kernel/signal.c b/kernel/signal.c
1715 index 4e3cff1..3175186 100644
1716 --- a/kernel/signal.c
1717 +++ b/kernel/signal.c
1718 @@ -2421,9 +2421,13 @@ SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
1719 return -EFAULT;
1720
1721 /* Not even root can pretend to send signals from the kernel.
1722 - Nor can they impersonate a kill(), which adds source info. */
1723 - if (info.si_code >= 0)
1724 + * Nor can they impersonate a kill()/tgkill(), which adds source info.
1725 + */
1726 + if (info.si_code != SI_QUEUE) {
1727 + /* We used to allow any < 0 si_code */
1728 + WARN_ON_ONCE(info.si_code < 0);
1729 return -EPERM;
1730 + }
1731 info.si_signo = sig;
1732
1733 /* POSIX.1b doesn't mention process groups. */
1734 @@ -2437,9 +2441,13 @@ long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
1735 return -EINVAL;
1736
1737 /* Not even root can pretend to send signals from the kernel.
1738 - Nor can they impersonate a kill(), which adds source info. */
1739 - if (info->si_code >= 0)
1740 + * Nor can they impersonate a kill()/tgkill(), which adds source info.
1741 + */
1742 + if (info->si_code != SI_QUEUE) {
1743 + /* We used to allow any < 0 si_code */
1744 + WARN_ON_ONCE(info->si_code < 0);
1745 return -EPERM;
1746 + }
1747 info->si_signo = sig;
1748
1749 return do_send_specific(tgid, pid, sig, info);
1750 diff --git a/kernel/sysctl.c b/kernel/sysctl.c
1751 index 4eed0af..443fd20 100644
1752 --- a/kernel/sysctl.c
1753 +++ b/kernel/sysctl.c
1754 @@ -169,6 +169,11 @@ static int proc_taint(struct ctl_table *table, int write,
1755 void __user *buffer, size_t *lenp, loff_t *ppos);
1756 #endif
1757
1758 +#ifdef CONFIG_PRINTK
1759 +static int proc_dmesg_restrict(struct ctl_table *table, int write,
1760 + void __user *buffer, size_t *lenp, loff_t *ppos);
1761 +#endif
1762 +
1763 #ifdef CONFIG_MAGIC_SYSRQ
1764 /* Note: sysrq code uses it's own private copy */
1765 static int __sysrq_enabled = SYSRQ_DEFAULT_ENABLE;
1766 @@ -713,7 +718,7 @@ static struct ctl_table kern_table[] = {
1767 .data = &kptr_restrict,
1768 .maxlen = sizeof(int),
1769 .mode = 0644,
1770 - .proc_handler = proc_dointvec_minmax,
1771 + .proc_handler = proc_dmesg_restrict,
1772 .extra1 = &zero,
1773 .extra2 = &two,
1774 },
1775 @@ -2397,6 +2402,17 @@ static int proc_taint(struct ctl_table *table, int write,
1776 return err;
1777 }
1778
1779 +#ifdef CONFIG_PRINTK
1780 +static int proc_dmesg_restrict(struct ctl_table *table, int write,
1781 + void __user *buffer, size_t *lenp, loff_t *ppos)
1782 +{
1783 + if (write && !capable(CAP_SYS_ADMIN))
1784 + return -EPERM;
1785 +
1786 + return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
1787 +}
1788 +#endif
1789 +
1790 struct do_proc_dointvec_minmax_conv_param {
1791 int *min;
1792 int *max;
1793 diff --git a/mm/backing-dev.c b/mm/backing-dev.c
1794 index 027100d..8e4ed88 100644
1795 --- a/mm/backing-dev.c
1796 +++ b/mm/backing-dev.c
1797 @@ -604,7 +604,7 @@ static void bdi_prune_sb(struct backing_dev_info *bdi)
1798 spin_lock(&sb_lock);
1799 list_for_each_entry(sb, &super_blocks, s_list) {
1800 if (sb->s_bdi == bdi)
1801 - sb->s_bdi = NULL;
1802 + sb->s_bdi = &default_backing_dev_info;
1803 }
1804 spin_unlock(&sb_lock);
1805 }
1806 diff --git a/mm/compaction.c b/mm/compaction.c
1807 index 8be430b..dcb058b 100644
1808 --- a/mm/compaction.c
1809 +++ b/mm/compaction.c
1810 @@ -42,8 +42,6 @@ struct compact_control {
1811 unsigned int order; /* order a direct compactor needs */
1812 int migratetype; /* MOVABLE, RECLAIMABLE etc */
1813 struct zone *zone;
1814 -
1815 - int compact_mode;
1816 };
1817
1818 static unsigned long release_freepages(struct list_head *freelist)
1819 @@ -397,10 +395,7 @@ static int compact_finished(struct zone *zone,
1820 return COMPACT_COMPLETE;
1821
1822 /* Compaction run is not finished if the watermark is not met */
1823 - if (cc->compact_mode != COMPACT_MODE_KSWAPD)
1824 - watermark = low_wmark_pages(zone);
1825 - else
1826 - watermark = high_wmark_pages(zone);
1827 + watermark = low_wmark_pages(zone);
1828 watermark += (1 << cc->order);
1829
1830 if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0))
1831 @@ -413,15 +408,6 @@ static int compact_finished(struct zone *zone,
1832 if (cc->order == -1)
1833 return COMPACT_CONTINUE;
1834
1835 - /*
1836 - * Generating only one page of the right order is not enough
1837 - * for kswapd, we must continue until we're above the high
1838 - * watermark as a pool for high order GFP_ATOMIC allocations
1839 - * too.
1840 - */
1841 - if (cc->compact_mode == COMPACT_MODE_KSWAPD)
1842 - return COMPACT_CONTINUE;
1843 -
1844 /* Direct compactor: Is a suitable page free? */
1845 for (order = cc->order; order < MAX_ORDER; order++) {
1846 /* Job done if page is free of the right migratetype */
1847 @@ -543,8 +529,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
1848
1849 unsigned long compact_zone_order(struct zone *zone,
1850 int order, gfp_t gfp_mask,
1851 - bool sync,
1852 - int compact_mode)
1853 + bool sync)
1854 {
1855 struct compact_control cc = {
1856 .nr_freepages = 0,
1857 @@ -553,7 +538,6 @@ unsigned long compact_zone_order(struct zone *zone,
1858 .migratetype = allocflags_to_migratetype(gfp_mask),
1859 .zone = zone,
1860 .sync = sync,
1861 - .compact_mode = compact_mode,
1862 };
1863 INIT_LIST_HEAD(&cc.freepages);
1864 INIT_LIST_HEAD(&cc.migratepages);
1865 @@ -599,8 +583,7 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
1866 nodemask) {
1867 int status;
1868
1869 - status = compact_zone_order(zone, order, gfp_mask, sync,
1870 - COMPACT_MODE_DIRECT_RECLAIM);
1871 + status = compact_zone_order(zone, order, gfp_mask, sync);
1872 rc = max(status, rc);
1873
1874 /* If a normal allocation would succeed, stop compacting */
1875 @@ -631,7 +614,6 @@ static int compact_node(int nid)
1876 .nr_freepages = 0,
1877 .nr_migratepages = 0,
1878 .order = -1,
1879 - .compact_mode = COMPACT_MODE_DIRECT_RECLAIM,
1880 };
1881
1882 zone = &pgdat->node_zones[zoneid];
1883 diff --git a/mm/oom_kill.c b/mm/oom_kill.c
1884 index 7dcca55..33b5861 100644
1885 --- a/mm/oom_kill.c
1886 +++ b/mm/oom_kill.c
1887 @@ -31,6 +31,7 @@
1888 #include <linux/memcontrol.h>
1889 #include <linux/mempolicy.h>
1890 #include <linux/security.h>
1891 +#include <linux/ptrace.h>
1892
1893 int sysctl_panic_on_oom;
1894 int sysctl_oom_kill_allocating_task;
1895 @@ -292,13 +293,15 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
1896 unsigned long totalpages, struct mem_cgroup *mem,
1897 const nodemask_t *nodemask)
1898 {
1899 - struct task_struct *p;
1900 + struct task_struct *g, *p;
1901 struct task_struct *chosen = NULL;
1902 *ppoints = 0;
1903
1904 - for_each_process(p) {
1905 + do_each_thread(g, p) {
1906 unsigned int points;
1907
1908 + if (!p->mm)
1909 + continue;
1910 if (oom_unkillable_task(p, mem, nodemask))
1911 continue;
1912
1913 @@ -314,22 +317,29 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
1914 if (test_tsk_thread_flag(p, TIF_MEMDIE))
1915 return ERR_PTR(-1UL);
1916
1917 - /*
1918 - * This is in the process of releasing memory so wait for it
1919 - * to finish before killing some other task by mistake.
1920 - *
1921 - * However, if p is the current task, we allow the 'kill' to
1922 - * go ahead if it is exiting: this will simply set TIF_MEMDIE,
1923 - * which will allow it to gain access to memory reserves in
1924 - * the process of exiting and releasing its resources.
1925 - * Otherwise we could get an easy OOM deadlock.
1926 - */
1927 - if (thread_group_empty(p) && (p->flags & PF_EXITING) && p->mm) {
1928 - if (p != current)
1929 - return ERR_PTR(-1UL);
1930 -
1931 - chosen = p;
1932 - *ppoints = 1000;
1933 + if (p->flags & PF_EXITING) {
1934 + /*
1935 + * If p is the current task and is in the process of
1936 + * releasing memory, we allow the "kill" to set
1937 + * TIF_MEMDIE, which will allow it to gain access to
1938 + * memory reserves. Otherwise, it may stall forever.
1939 + *
1940 + * The loop isn't broken here, however, in case other
1941 + * threads are found to have already been oom killed.
1942 + */
1943 + if (p == current) {
1944 + chosen = p;
1945 + *ppoints = 1000;
1946 + } else {
1947 + /*
1948 + * If this task is not being ptraced on exit,
1949 + * then wait for it to finish before killing
1950 + * some other task unnecessarily.
1951 + */
1952 + if (!(task_ptrace(p->group_leader) &
1953 + PT_TRACE_EXIT))
1954 + return ERR_PTR(-1UL);
1955 + }
1956 }
1957
1958 points = oom_badness(p, mem, nodemask, totalpages);
1959 @@ -337,7 +347,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
1960 chosen = p;
1961 *ppoints = points;
1962 }
1963 - }
1964 + } while_each_thread(g, p);
1965
1966 return chosen;
1967 }
1968 @@ -491,6 +501,8 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
1969 list_for_each_entry(child, &t->children, sibling) {
1970 unsigned int child_points;
1971
1972 + if (child->mm == p->mm)
1973 + continue;
1974 /*
1975 * oom_badness() returns 0 if the thread is unkillable
1976 */
1977 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
1978 index cdef1d4..2828037 100644
1979 --- a/mm/page_alloc.c
1980 +++ b/mm/page_alloc.c
1981 @@ -286,7 +286,7 @@ static void bad_page(struct page *page)
1982
1983 /* Don't complain about poisoned pages */
1984 if (PageHWPoison(page)) {
1985 - __ClearPageBuddy(page);
1986 + reset_page_mapcount(page); /* remove PageBuddy */
1987 return;
1988 }
1989
1990 @@ -317,7 +317,7 @@ static void bad_page(struct page *page)
1991 dump_stack();
1992 out:
1993 /* Leave bad fields for debug, except PageBuddy could make trouble */
1994 - __ClearPageBuddy(page);
1995 + reset_page_mapcount(page); /* remove PageBuddy */
1996 add_taint(TAINT_BAD_PAGE);
1997 }
1998
1999 diff --git a/mm/shmem.c b/mm/shmem.c
2000 index 5ee67c9..5ac23d5 100644
2001 --- a/mm/shmem.c
2002 +++ b/mm/shmem.c
2003 @@ -2791,5 +2791,6 @@ int shmem_zero_setup(struct vm_area_struct *vma)
2004 fput(vma->vm_file);
2005 vma->vm_file = file;
2006 vma->vm_ops = &shmem_vm_ops;
2007 + vma->vm_flags |= VM_CAN_NONLINEAR;
2008 return 0;
2009 }
2010 diff --git a/mm/slab.c b/mm/slab.c
2011 index 37961d1..4c6e2e3 100644
2012 --- a/mm/slab.c
2013 +++ b/mm/slab.c
2014 @@ -2288,8 +2288,8 @@ kmem_cache_create (const char *name, size_t size, size_t align,
2015 if (ralign < align) {
2016 ralign = align;
2017 }
2018 - /* disable debug if not aligning with REDZONE_ALIGN */
2019 - if (ralign & (__alignof__(unsigned long long) - 1))
2020 + /* disable debug if necessary */
2021 + if (ralign > __alignof__(unsigned long long))
2022 flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
2023 /*
2024 * 4) Store it.
2025 @@ -2315,8 +2315,8 @@ kmem_cache_create (const char *name, size_t size, size_t align,
2026 */
2027 if (flags & SLAB_RED_ZONE) {
2028 /* add space for red zone words */
2029 - cachep->obj_offset += align;
2030 - size += align + sizeof(unsigned long long);
2031 + cachep->obj_offset += sizeof(unsigned long long);
2032 + size += 2 * sizeof(unsigned long long);
2033 }
2034 if (flags & SLAB_STORE_USER) {
2035 /* user store requires one word storage behind the end of
2036 diff --git a/mm/swapfile.c b/mm/swapfile.c
2037 index 0341c57..6d6d28c 100644
2038 --- a/mm/swapfile.c
2039 +++ b/mm/swapfile.c
2040 @@ -2149,8 +2149,13 @@ bad_swap_2:
2041 p->flags = 0;
2042 spin_unlock(&swap_lock);
2043 vfree(swap_map);
2044 - if (swap_file)
2045 + if (swap_file) {
2046 + if (did_down) {
2047 + mutex_unlock(&inode->i_mutex);
2048 + did_down = 0;
2049 + }
2050 filp_close(swap_file, NULL);
2051 + }
2052 out:
2053 if (page && !IS_ERR(page)) {
2054 kunmap(page);
2055 diff --git a/mm/vmscan.c b/mm/vmscan.c
2056 index 6771ea7..3b4a41d 100644
2057 --- a/mm/vmscan.c
2058 +++ b/mm/vmscan.c
2059 @@ -2397,7 +2397,6 @@ loop_again:
2060 * cause too much scanning of the lower zones.
2061 */
2062 for (i = 0; i <= end_zone; i++) {
2063 - int compaction;
2064 struct zone *zone = pgdat->node_zones + i;
2065 int nr_slab;
2066
2067 @@ -2428,24 +2427,9 @@ loop_again:
2068 sc.nr_reclaimed += reclaim_state->reclaimed_slab;
2069 total_scanned += sc.nr_scanned;
2070
2071 - compaction = 0;
2072 - if (order &&
2073 - zone_watermark_ok(zone, 0,
2074 - high_wmark_pages(zone),
2075 - end_zone, 0) &&
2076 - !zone_watermark_ok(zone, order,
2077 - high_wmark_pages(zone),
2078 - end_zone, 0)) {
2079 - compact_zone_order(zone,
2080 - order,
2081 - sc.gfp_mask, false,
2082 - COMPACT_MODE_KSWAPD);
2083 - compaction = 1;
2084 - }
2085 -
2086 if (zone->all_unreclaimable)
2087 continue;
2088 - if (!compaction && nr_slab == 0 &&
2089 + if (nr_slab == 0 &&
2090 !zone_reclaimable(zone))
2091 zone->all_unreclaimable = 1;
2092 /*
2093 diff --git a/net/socket.c b/net/socket.c
2094 index ac2219f..29c7df0 100644
2095 --- a/net/socket.c
2096 +++ b/net/socket.c
2097 @@ -2583,23 +2583,123 @@ static int dev_ifconf(struct net *net, struct compat_ifconf __user *uifc32)
2098
2099 static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
2100 {
2101 + struct compat_ethtool_rxnfc __user *compat_rxnfc;
2102 + bool convert_in = false, convert_out = false;
2103 + size_t buf_size = ALIGN(sizeof(struct ifreq), 8);
2104 + struct ethtool_rxnfc __user *rxnfc;
2105 struct ifreq __user *ifr;
2106 + u32 rule_cnt = 0, actual_rule_cnt;
2107 + u32 ethcmd;
2108 u32 data;
2109 - void __user *datap;
2110 + int ret;
2111 +
2112 + if (get_user(data, &ifr32->ifr_ifru.ifru_data))
2113 + return -EFAULT;
2114
2115 - ifr = compat_alloc_user_space(sizeof(*ifr));
2116 + compat_rxnfc = compat_ptr(data);
2117
2118 - if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ))
2119 + if (get_user(ethcmd, &compat_rxnfc->cmd))
2120 return -EFAULT;
2121
2122 - if (get_user(data, &ifr32->ifr_ifru.ifru_data))
2123 + /* Most ethtool structures are defined without padding.
2124 + * Unfortunately struct ethtool_rxnfc is an exception.
2125 + */
2126 + switch (ethcmd) {
2127 + default:
2128 + break;
2129 + case ETHTOOL_GRXCLSRLALL:
2130 + /* Buffer size is variable */
2131 + if (get_user(rule_cnt, &compat_rxnfc->rule_cnt))
2132 + return -EFAULT;
2133 + if (rule_cnt > KMALLOC_MAX_SIZE / sizeof(u32))
2134 + return -ENOMEM;
2135 + buf_size += rule_cnt * sizeof(u32);
2136 + /* fall through */
2137 + case ETHTOOL_GRXRINGS:
2138 + case ETHTOOL_GRXCLSRLCNT:
2139 + case ETHTOOL_GRXCLSRULE:
2140 + convert_out = true;
2141 + /* fall through */
2142 + case ETHTOOL_SRXCLSRLDEL:
2143 + case ETHTOOL_SRXCLSRLINS:
2144 + buf_size += sizeof(struct ethtool_rxnfc);
2145 + convert_in = true;
2146 + break;
2147 + }
2148 +
2149 + ifr = compat_alloc_user_space(buf_size);
2150 + rxnfc = (void *)ifr + ALIGN(sizeof(struct ifreq), 8);
2151 +
2152 + if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ))
2153 return -EFAULT;
2154
2155 - datap = compat_ptr(data);
2156 - if (put_user(datap, &ifr->ifr_ifru.ifru_data))
2157 + if (put_user(convert_in ? rxnfc : compat_ptr(data),
2158 + &ifr->ifr_ifru.ifru_data))
2159 return -EFAULT;
2160
2161 - return dev_ioctl(net, SIOCETHTOOL, ifr);
2162 + if (convert_in) {
2163 + /* We expect there to be holes between fs.m_u and
2164 + * fs.ring_cookie and at the end of fs, but nowhere else.
2165 + */
2166 + BUILD_BUG_ON(offsetof(struct compat_ethtool_rxnfc, fs.m_u) +
2167 + sizeof(compat_rxnfc->fs.m_u) !=
2168 + offsetof(struct ethtool_rxnfc, fs.m_u) +
2169 + sizeof(rxnfc->fs.m_u));
2170 + BUILD_BUG_ON(
2171 + offsetof(struct compat_ethtool_rxnfc, fs.location) -
2172 + offsetof(struct compat_ethtool_rxnfc, fs.ring_cookie) !=
2173 + offsetof(struct ethtool_rxnfc, fs.location) -
2174 + offsetof(struct ethtool_rxnfc, fs.ring_cookie));
2175 +
2176 + if (copy_in_user(rxnfc, compat_rxnfc,
2177 + (void *)(&rxnfc->fs.m_u + 1) -
2178 + (void *)rxnfc) ||
2179 + copy_in_user(&rxnfc->fs.ring_cookie,
2180 + &compat_rxnfc->fs.ring_cookie,
2181 + (void *)(&rxnfc->fs.location + 1) -
2182 + (void *)&rxnfc->fs.ring_cookie) ||
2183 + copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt,
2184 + sizeof(rxnfc->rule_cnt)))
2185 + return -EFAULT;
2186 + }
2187 +
2188 + ret = dev_ioctl(net, SIOCETHTOOL, ifr);
2189 + if (ret)
2190 + return ret;
2191 +
2192 + if (convert_out) {
2193 + if (copy_in_user(compat_rxnfc, rxnfc,
2194 + (const void *)(&rxnfc->fs.m_u + 1) -
2195 + (const void *)rxnfc) ||
2196 + copy_in_user(&compat_rxnfc->fs.ring_cookie,
2197 + &rxnfc->fs.ring_cookie,
2198 + (const void *)(&rxnfc->fs.location + 1) -
2199 + (const void *)&rxnfc->fs.ring_cookie) ||
2200 + copy_in_user(&compat_rxnfc->rule_cnt, &rxnfc->rule_cnt,
2201 + sizeof(rxnfc->rule_cnt)))
2202 + return -EFAULT;
2203 +
2204 + if (ethcmd == ETHTOOL_GRXCLSRLALL) {
2205 + /* As an optimisation, we only copy the actual
2206 + * number of rules that the underlying
2207 + * function returned. Since Mallory might
2208 + * change the rule count in user memory, we
2209 + * check that it is less than the rule count
2210 + * originally given (as the user buffer size),
2211 + * which has been range-checked.
2212 + */
2213 + if (get_user(actual_rule_cnt, &rxnfc->rule_cnt))
2214 + return -EFAULT;
2215 + if (actual_rule_cnt < rule_cnt)
2216 + rule_cnt = actual_rule_cnt;
2217 + if (copy_in_user(&compat_rxnfc->rule_locs[0],
2218 + &rxnfc->rule_locs[0],
2219 + rule_cnt * sizeof(u32)))
2220 + return -EFAULT;
2221 + }
2222 + }
2223 +
2224 + return 0;
2225 }
2226
2227 static int compat_siocwandev(struct net *net, struct compat_ifreq __user *uifr32)
2228 diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
2229 index be96d42..1e336a0 100644
2230 --- a/net/sunrpc/xprtsock.c
2231 +++ b/net/sunrpc/xprtsock.c
2232 @@ -710,6 +710,8 @@ static void xs_reset_transport(struct sock_xprt *transport)
2233 if (sk == NULL)
2234 return;
2235
2236 + transport->srcport = 0;
2237 +
2238 write_lock_bh(&sk->sk_callback_lock);
2239 transport->inet = NULL;
2240 transport->sock = NULL;
2241 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
2242 index acd2099..c2eb6a7 100644
2243 --- a/sound/pci/hda/patch_realtek.c
2244 +++ b/sound/pci/hda/patch_realtek.c
2245 @@ -16085,9 +16085,12 @@ static int alc861_auto_create_multi_out_ctls(struct hda_codec *codec,
2246 return err;
2247 } else {
2248 const char *name = pfx;
2249 - if (!name)
2250 + int index = i;
2251 + if (!name) {
2252 name = chname[i];
2253 - err = __alc861_create_out_sw(codec, name, nid, i, 3);
2254 + index = 0;
2255 + }
2256 + err = __alc861_create_out_sw(codec, name, nid, index, 3);
2257 if (err < 0)
2258 return err;
2259 }
2260 @@ -17238,16 +17241,19 @@ static int alc861vd_auto_create_multi_out_ctls(struct alc_spec *spec,
2261 return err;
2262 } else {
2263 const char *name = pfx;
2264 - if (!name)
2265 + int index = i;
2266 + if (!name) {
2267 name = chname[i];
2268 + index = 0;
2269 + }
2270 err = __add_pb_vol_ctrl(spec, ALC_CTL_WIDGET_VOL,
2271 - name, i,
2272 + name, index,
2273 HDA_COMPOSE_AMP_VAL(nid_v, 3, 0,
2274 HDA_OUTPUT));
2275 if (err < 0)
2276 return err;
2277 err = __add_pb_sw_ctrl(spec, ALC_CTL_BIND_MUTE,
2278 - name, i,
2279 + name, index,
2280 HDA_COMPOSE_AMP_VAL(nid_s, 3, 2,
2281 HDA_INPUT));
2282 if (err < 0)
2283 @@ -19296,12 +19302,15 @@ static int alc662_auto_create_multi_out_ctls(struct hda_codec *codec,
2284 return err;
2285 } else {
2286 const char *name = pfx;
2287 - if (!name)
2288 + int index = i;
2289 + if (!name) {
2290 name = chname[i];
2291 - err = __alc662_add_vol_ctl(spec, name, nid, i, 3);
2292 + index = 0;
2293 + }
2294 + err = __alc662_add_vol_ctl(spec, name, nid, index, 3);
2295 if (err < 0)
2296 return err;
2297 - err = __alc662_add_sw_ctl(spec, name, mix, i, 3);
2298 + err = __alc662_add_sw_ctl(spec, name, mix, index, 3);
2299 if (err < 0)
2300 return err;
2301 }
2302 diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
2303 index 052062d..8566119 100644
2304 --- a/sound/pci/hda/patch_sigmatel.c
2305 +++ b/sound/pci/hda/patch_sigmatel.c
2306 @@ -94,6 +94,7 @@ enum {
2307 STAC_92HD83XXX_REF,
2308 STAC_92HD83XXX_PWR_REF,
2309 STAC_DELL_S14,
2310 + STAC_DELL_E5520M,
2311 STAC_92HD83XXX_HP,
2312 STAC_HP_DV7_4000,
2313 STAC_92HD83XXX_MODELS
2314 @@ -1657,6 +1658,13 @@ static unsigned int dell_s14_pin_configs[10] = {
2315 0x40f000f0, 0x40f000f0,
2316 };
2317
2318 +/* Switch int mic from 0x20 to 0x11 */
2319 +static unsigned int dell_e5520m_pin_configs[10] = {
2320 + 0x04a11020, 0x0421101f, 0x400000f0, 0x90170110,
2321 + 0x23011050, 0x23a1102e, 0x400000f3, 0xd5a30130,
2322 + 0x400000f0, 0x40f000f0,
2323 +};
2324 +
2325 static unsigned int hp_dv7_4000_pin_configs[10] = {
2326 0x03a12050, 0x0321201f, 0x40f000f0, 0x90170110,
2327 0x40f000f0, 0x40f000f0, 0x90170110, 0xd5a30140,
2328 @@ -1667,6 +1675,7 @@ static unsigned int *stac92hd83xxx_brd_tbl[STAC_92HD83XXX_MODELS] = {
2329 [STAC_92HD83XXX_REF] = ref92hd83xxx_pin_configs,
2330 [STAC_92HD83XXX_PWR_REF] = ref92hd83xxx_pin_configs,
2331 [STAC_DELL_S14] = dell_s14_pin_configs,
2332 + [STAC_DELL_E5520M] = dell_e5520m_pin_configs,
2333 [STAC_HP_DV7_4000] = hp_dv7_4000_pin_configs,
2334 };
2335
2336 @@ -1675,6 +1684,7 @@ static const char * const stac92hd83xxx_models[STAC_92HD83XXX_MODELS] = {
2337 [STAC_92HD83XXX_REF] = "ref",
2338 [STAC_92HD83XXX_PWR_REF] = "mic-ref",
2339 [STAC_DELL_S14] = "dell-s14",
2340 + [STAC_DELL_E5520M] = "dell-e5520m",
2341 [STAC_92HD83XXX_HP] = "hp",
2342 [STAC_HP_DV7_4000] = "hp-dv7-4000",
2343 };
2344 @@ -1687,6 +1697,14 @@ static struct snd_pci_quirk stac92hd83xxx_cfg_tbl[] = {
2345 "DFI LanParty", STAC_92HD83XXX_REF),
2346 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02ba,
2347 "unknown Dell", STAC_DELL_S14),
2348 + SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x049a,
2349 + "Dell E5520", STAC_DELL_E5520M),
2350 + SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x049b,
2351 + "Dell E5420", STAC_DELL_E5520M),
2352 + SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x04eb,
2353 + "Dell E5420m", STAC_DELL_E5520M),
2354 + SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x04ec,
2355 + "Dell E5520m", STAC_DELL_E5520M),
2356 SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xff00, 0x3600,
2357 "HP", STAC_92HD83XXX_HP),
2358 {} /* terminator */
2359 diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
2360 index 63b0054..acc4579 100644
2361 --- a/sound/pci/hda/patch_via.c
2362 +++ b/sound/pci/hda/patch_via.c
2363 @@ -159,6 +159,7 @@ struct via_spec {
2364 #endif
2365 };
2366
2367 +static enum VIA_HDA_CODEC get_codec_type(struct hda_codec *codec);
2368 static struct via_spec * via_new_spec(struct hda_codec *codec)
2369 {
2370 struct via_spec *spec;
2371 @@ -169,6 +170,10 @@ static struct via_spec * via_new_spec(struct hda_codec *codec)
2372
2373 codec->spec = spec;
2374 spec->codec = codec;
2375 + spec->codec_type = get_codec_type(codec);
2376 + /* VT1708BCE & VT1708S are almost same */
2377 + if (spec->codec_type == VT1708BCE)
2378 + spec->codec_type = VT1708S;
2379 return spec;
2380 }
2381
2382 @@ -1101,6 +1106,7 @@ static int via_mux_enum_put(struct snd_kcontrol *kcontrol,
2383 struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
2384 struct via_spec *spec = codec->spec;
2385 unsigned int adc_idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
2386 + int ret;
2387
2388 if (!spec->mux_nids[adc_idx])
2389 return -EINVAL;
2390 @@ -1109,12 +1115,14 @@ static int via_mux_enum_put(struct snd_kcontrol *kcontrol,
2391 AC_VERB_GET_POWER_STATE, 0x00) != AC_PWRST_D0)
2392 snd_hda_codec_write(codec, spec->mux_nids[adc_idx], 0,
2393 AC_VERB_SET_POWER_STATE, AC_PWRST_D0);
2394 - /* update jack power state */
2395 - set_jack_power_state(codec);
2396
2397 - return snd_hda_input_mux_put(codec, spec->input_mux, ucontrol,
2398 + ret = snd_hda_input_mux_put(codec, spec->input_mux, ucontrol,
2399 spec->mux_nids[adc_idx],
2400 &spec->cur_mux[adc_idx]);
2401 + /* update jack power state */
2402 + set_jack_power_state(codec);
2403 +
2404 + return ret;
2405 }
2406
2407 static int via_independent_hp_info(struct snd_kcontrol *kcontrol,
2408 @@ -1188,8 +1196,16 @@ static int via_independent_hp_put(struct snd_kcontrol *kcontrol,
2409 /* Get Independent Mode index of headphone pin widget */
2410 spec->hp_independent_mode = spec->hp_independent_mode_index == pinsel
2411 ? 1 : 0;
2412 - snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_CONNECT_SEL, pinsel);
2413 + if (spec->codec_type == VT1718S)
2414 + snd_hda_codec_write(codec, nid, 0,
2415 + AC_VERB_SET_CONNECT_SEL, pinsel ? 2 : 0);
2416 + else
2417 + snd_hda_codec_write(codec, nid, 0,
2418 + AC_VERB_SET_CONNECT_SEL, pinsel);
2419
2420 + if (spec->codec_type == VT1812)
2421 + snd_hda_codec_write(codec, 0x35, 0,
2422 + AC_VERB_SET_CONNECT_SEL, pinsel);
2423 if (spec->multiout.hp_nid && spec->multiout.hp_nid
2424 != spec->multiout.dac_nids[HDA_FRONT])
2425 snd_hda_codec_setup_stream(codec, spec->multiout.hp_nid,
2426 @@ -1208,6 +1224,8 @@ static int via_independent_hp_put(struct snd_kcontrol *kcontrol,
2427 activate_ctl(codec, "Headphone Playback Switch",
2428 spec->hp_independent_mode);
2429 }
2430 + /* update jack power state */
2431 + set_jack_power_state(codec);
2432 return 0;
2433 }
2434
2435 @@ -1248,9 +1266,12 @@ static int via_hp_build(struct hda_codec *codec)
2436 break;
2437 }
2438
2439 - nums = snd_hda_get_connections(codec, nid, conn, HDA_MAX_CONNECTIONS);
2440 - if (nums <= 1)
2441 - return 0;
2442 + if (spec->codec_type != VT1708) {
2443 + nums = snd_hda_get_connections(codec, nid,
2444 + conn, HDA_MAX_CONNECTIONS);
2445 + if (nums <= 1)
2446 + return 0;
2447 + }
2448
2449 knew = via_clone_control(spec, &via_hp_mixer[0]);
2450 if (knew == NULL)
2451 @@ -1310,6 +1331,11 @@ static void mute_aa_path(struct hda_codec *codec, int mute)
2452 start_idx = 2;
2453 end_idx = 4;
2454 break;
2455 + case VT1718S:
2456 + nid_mixer = 0x21;
2457 + start_idx = 1;
2458 + end_idx = 3;
2459 + break;
2460 default:
2461 return;
2462 }
2463 @@ -2185,10 +2211,6 @@ static int via_init(struct hda_codec *codec)
2464 for (i = 0; i < spec->num_iverbs; i++)
2465 snd_hda_sequence_write(codec, spec->init_verbs[i]);
2466
2467 - spec->codec_type = get_codec_type(codec);
2468 - if (spec->codec_type == VT1708BCE)
2469 - spec->codec_type = VT1708S; /* VT1708BCE & VT1708S are almost
2470 - same */
2471 /* Lydia Add for EAPD enable */
2472 if (!spec->dig_in_nid) { /* No Digital In connection */
2473 if (spec->dig_in_pin) {
2474 @@ -2438,7 +2460,14 @@ static int vt_auto_create_analog_input_ctls(struct hda_codec *codec,
2475 else
2476 type_idx = 0;
2477 label = hda_get_autocfg_input_label(codec, cfg, i);
2478 - err = via_new_analog_input(spec, label, type_idx, idx, cap_nid);
2479 + if (spec->codec_type == VT1708S ||
2480 + spec->codec_type == VT1702 ||
2481 + spec->codec_type == VT1716S)
2482 + err = via_new_analog_input(spec, label, type_idx,
2483 + idx+1, cap_nid);
2484 + else
2485 + err = via_new_analog_input(spec, label, type_idx,
2486 + idx, cap_nid);
2487 if (err < 0)
2488 return err;
2489 snd_hda_add_imux_item(imux, label, idx, NULL);
2490 diff --git a/sound/soc/codecs/uda134x.c b/sound/soc/codecs/uda134x.c
2491 index e76847a..48ffd40 100644
2492 --- a/sound/soc/codecs/uda134x.c
2493 +++ b/sound/soc/codecs/uda134x.c
2494 @@ -486,7 +486,8 @@ static struct snd_soc_dai_driver uda134x_dai = {
2495 static int uda134x_soc_probe(struct snd_soc_codec *codec)
2496 {
2497 struct uda134x_priv *uda134x;
2498 - struct uda134x_platform_data *pd = dev_get_drvdata(codec->card->dev);
2499 + struct uda134x_platform_data *pd = codec->card->dev->platform_data;
2500 +
2501 int ret;
2502
2503 printk(KERN_INFO "UDA134X SoC Audio Codec\n");
2504 diff --git a/sound/soc/samsung/s3c24xx_uda134x.c b/sound/soc/samsung/s3c24xx_uda134x.c
2505 index 2c09e93..86f1dc4 100644
2506 --- a/sound/soc/samsung/s3c24xx_uda134x.c
2507 +++ b/sound/soc/samsung/s3c24xx_uda134x.c
2508 @@ -226,7 +226,7 @@ static struct snd_soc_ops s3c24xx_uda134x_ops = {
2509 static struct snd_soc_dai_link s3c24xx_uda134x_dai_link = {
2510 .name = "UDA134X",
2511 .stream_name = "UDA134X",
2512 - .codec_name = "uda134x-hifi",
2513 + .codec_name = "uda134x-codec",
2514 .codec_dai_name = "uda134x-hifi",
2515 .cpu_dai_name = "s3c24xx-iis",
2516 .ops = &s3c24xx_uda134x_ops,
2517 @@ -321,6 +321,7 @@ static int s3c24xx_uda134x_probe(struct platform_device *pdev)
2518
2519 platform_set_drvdata(s3c24xx_uda134x_snd_device,
2520 &snd_soc_s3c24xx_uda134x);
2521 + platform_device_add_data(s3c24xx_uda134x_snd_device, &s3c24xx_uda134x, sizeof(s3c24xx_uda134x));
2522 ret = platform_device_add(s3c24xx_uda134x_snd_device);
2523 if (ret) {
2524 printk(KERN_ERR "S3C24XX_UDA134X SoC Audio: Unable to add\n");

  ViewVC Help
Powered by ViewVC 1.1.20