/[linux-patches]/genpatches-2.6/tags/2.6.37-6/1005_linux-2.6.37.6.patch
Gentoo

Contents of /genpatches-2.6/tags/2.6.37-6/1005_linux-2.6.37.6.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1900 - (show annotations) (download)
Mon Mar 28 22:05:09 2011 UTC (6 years, 5 months ago) by mpagano
File size: 69251 byte(s)
2.6.37-6 release
1 diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c
2 index 90a15d2..2130ca6 100644
3 --- a/arch/sh/kernel/ptrace_32.c
4 +++ b/arch/sh/kernel/ptrace_32.c
5 @@ -101,6 +101,8 @@ static int set_single_step(struct task_struct *tsk, unsigned long addr)
6
7 attr = bp->attr;
8 attr.bp_addr = addr;
9 + /* reenable breakpoint */
10 + attr.disabled = false;
11 err = modify_user_hw_breakpoint(bp, &attr);
12 if (unlikely(err))
13 return err;
14 @@ -392,6 +394,9 @@ long arch_ptrace(struct task_struct *child, long request,
15 tmp = 0;
16 } else {
17 unsigned long index;
18 + ret = init_fpu(child);
19 + if (ret)
20 + break;
21 index = addr - offsetof(struct user, fpu);
22 tmp = ((unsigned long *)child->thread.xstate)
23 [index >> 2];
24 @@ -423,6 +428,9 @@ long arch_ptrace(struct task_struct *child, long request,
25 else if (addr >= offsetof(struct user, fpu) &&
26 addr < offsetof(struct user, u_fpvalid)) {
27 unsigned long index;
28 + ret = init_fpu(child);
29 + if (ret)
30 + break;
31 index = addr - offsetof(struct user, fpu);
32 set_stopped_child_used_math(child);
33 ((unsigned long *)child->thread.xstate)
34 diff --git a/arch/sh/kernel/ptrace_64.c b/arch/sh/kernel/ptrace_64.c
35 index 4436eac..c8f9764 100644
36 --- a/arch/sh/kernel/ptrace_64.c
37 +++ b/arch/sh/kernel/ptrace_64.c
38 @@ -403,6 +403,9 @@ long arch_ptrace(struct task_struct *child, long request,
39 else if ((addr >= offsetof(struct user, fpu)) &&
40 (addr < offsetof(struct user, u_fpvalid))) {
41 unsigned long index;
42 + ret = init_fpu(child);
43 + if (ret)
44 + break;
45 index = addr - offsetof(struct user, fpu);
46 tmp = get_fpu_long(child, index);
47 } else if (addr == offsetof(struct user, u_fpvalid)) {
48 @@ -442,6 +445,9 @@ long arch_ptrace(struct task_struct *child, long request,
49 else if ((addr >= offsetof(struct user, fpu)) &&
50 (addr < offsetof(struct user, u_fpvalid))) {
51 unsigned long index;
52 + ret = init_fpu(child);
53 + if (ret)
54 + break;
55 index = addr - offsetof(struct user, fpu);
56 ret = put_fpu_long(child, index, data);
57 }
58 diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
59 index 6b89f5e..0a6c5ef 100644
60 --- a/arch/x86/include/asm/msr-index.h
61 +++ b/arch/x86/include/asm/msr-index.h
62 @@ -36,6 +36,11 @@
63 #define MSR_IA32_PERFCTR1 0x000000c2
64 #define MSR_FSB_FREQ 0x000000cd
65
66 +#define MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2
67 +#define NHM_C3_AUTO_DEMOTE (1UL << 25)
68 +#define NHM_C1_AUTO_DEMOTE (1UL << 26)
69 +#define ATM_LNC_C6_AUTO_DEMOTE (1UL << 25)
70 +
71 #define MSR_MTRRcap 0x000000fe
72 #define MSR_IA32_BBL_CR_CTL 0x00000119
73
74 diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
75 index 2d2673c..5655c22 100644
76 --- a/arch/x86/kernel/head64.c
77 +++ b/arch/x86/kernel/head64.c
78 @@ -77,9 +77,6 @@ void __init x86_64_start_kernel(char * real_mode_data)
79 /* Make NULL pointers segfault */
80 zap_identity_mappings();
81
82 - /* Cleanup the over mapped high alias */
83 - cleanup_highmap();
84 -
85 max_pfn_mapped = KERNEL_IMAGE_SIZE >> PAGE_SHIFT;
86
87 for (i = 0; i < NUM_EXCEPTION_VECTORS; i++) {
88 diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
89 index a0f52af..1d9b98e 100644
90 --- a/arch/x86/kernel/setup.c
91 +++ b/arch/x86/kernel/setup.c
92 @@ -297,6 +297,9 @@ static void __init init_gbpages(void)
93 static inline void init_gbpages(void)
94 {
95 }
96 +static void __init cleanup_highmap(void)
97 +{
98 +}
99 #endif
100
101 static void __init reserve_brk(void)
102 @@ -922,6 +925,8 @@ void __init setup_arch(char **cmdline_p)
103 */
104 reserve_brk();
105
106 + cleanup_highmap();
107 +
108 memblock.current_limit = get_max_mapped();
109 memblock_x86_fill();
110
111 diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
112 index c0e28a1..0398a73 100644
113 --- a/arch/x86/mm/init.c
114 +++ b/arch/x86/mm/init.c
115 @@ -279,25 +279,6 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
116 load_cr3(swapper_pg_dir);
117 #endif
118
119 -#ifdef CONFIG_X86_64
120 - if (!after_bootmem && !start) {
121 - pud_t *pud;
122 - pmd_t *pmd;
123 -
124 - mmu_cr4_features = read_cr4();
125 -
126 - /*
127 - * _brk_end cannot change anymore, but it and _end may be
128 - * located on different 2M pages. cleanup_highmap(), however,
129 - * can only consider _end when it runs, so destroy any
130 - * mappings beyond _brk_end here.
131 - */
132 - pud = pud_offset(pgd_offset_k(_brk_end), _brk_end);
133 - pmd = pmd_offset(pud, _brk_end - 1);
134 - while (++pmd <= pmd_offset(pud, (unsigned long)_end - 1))
135 - pmd_clear(pmd);
136 - }
137 -#endif
138 __flush_tlb_all();
139
140 if (!after_bootmem && e820_table_end > e820_table_start)
141 diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
142 index c14a542..68f9921 100644
143 --- a/arch/x86/mm/init_64.c
144 +++ b/arch/x86/mm/init_64.c
145 @@ -51,6 +51,7 @@
146 #include <asm/numa.h>
147 #include <asm/cacheflush.h>
148 #include <asm/init.h>
149 +#include <asm/setup.h>
150
151 static int __init parse_direct_gbpages_off(char *arg)
152 {
153 @@ -293,18 +294,18 @@ void __init init_extra_mapping_uc(unsigned long phys, unsigned long size)
154 * to the compile time generated pmds. This results in invalid pmds up
155 * to the point where we hit the physaddr 0 mapping.
156 *
157 - * We limit the mappings to the region from _text to _end. _end is
158 - * rounded up to the 2MB boundary. This catches the invalid pmds as
159 + * We limit the mappings to the region from _text to _brk_end. _brk_end
160 + * is rounded up to the 2MB boundary. This catches the invalid pmds as
161 * well, as they are located before _text:
162 */
163 void __init cleanup_highmap(void)
164 {
165 unsigned long vaddr = __START_KERNEL_map;
166 - unsigned long end = roundup((unsigned long)_end, PMD_SIZE) - 1;
167 + unsigned long vaddr_end = __START_KERNEL_map + (max_pfn_mapped << PAGE_SHIFT);
168 + unsigned long end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
169 pmd_t *pmd = level2_kernel_pgt;
170 - pmd_t *last_pmd = pmd + PTRS_PER_PMD;
171
172 - for (; pmd < last_pmd; pmd++, vaddr += PMD_SIZE) {
173 + for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) {
174 if (pmd_none(*pmd))
175 continue;
176 if (vaddr < (unsigned long) _text || vaddr > end)
177 diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
178 index 9ea0dc6..7abdb0c 100644
179 --- a/arch/x86/xen/mmu.c
180 +++ b/arch/x86/xen/mmu.c
181 @@ -2027,9 +2027,6 @@ static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
182 for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
183 pte_t pte;
184
185 - if (pfn > max_pfn_mapped)
186 - max_pfn_mapped = pfn;
187 -
188 if (!pte_none(pte_page[pteidx]))
189 continue;
190
191 @@ -2087,6 +2084,12 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
192 pud_t *l3;
193 pmd_t *l2;
194
195 + /* max_pfn_mapped is the last pfn mapped in the initial memory
196 + * mappings. Considering that on Xen after the kernel mappings we
197 + * have the mappings of some pages that don't exist in pfn space, we
198 + * set max_pfn_mapped to the last real pfn mapped. */
199 + max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list));
200 +
201 /* Zap identity mapping */
202 init_level4_pgt[0] = __pgd(0);
203
204 @@ -2191,9 +2194,7 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
205 initial_kernel_pmd =
206 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
207
208 - max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
209 - xen_start_info->nr_pt_frames * PAGE_SIZE +
210 - 512*1024);
211 + max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list));
212
213 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
214 memcpy(initial_kernel_pmd, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD);
215 diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c
216 index 69ad529..ea5ac2d 100644
217 --- a/drivers/firmware/dcdbas.c
218 +++ b/drivers/firmware/dcdbas.c
219 @@ -268,8 +268,10 @@ int dcdbas_smi_request(struct smi_cmd *smi_cmd)
220 }
221
222 /* generate SMI */
223 + /* inb to force posted write through and make SMI happen now */
224 asm volatile (
225 - "outb %b0,%w1"
226 + "outb %b0,%w1\n"
227 + "inb %w1"
228 : /* no output args */
229 : "a" (smi_cmd->command_code),
230 "d" (smi_cmd->command_address),
231 diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
232 index 2baa670..97b2985 100644
233 --- a/drivers/gpu/drm/drm_crtc.c
234 +++ b/drivers/gpu/drm/drm_crtc.c
235 @@ -1073,6 +1073,9 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
236 uint32_t __user *encoder_id;
237 struct drm_mode_group *mode_group;
238
239 + if (!drm_core_check_feature(dev, DRIVER_MODESET))
240 + return -EINVAL;
241 +
242 mutex_lock(&dev->mode_config.mutex);
243
244 /*
245 @@ -1244,6 +1247,9 @@ int drm_mode_getcrtc(struct drm_device *dev,
246 struct drm_mode_object *obj;
247 int ret = 0;
248
249 + if (!drm_core_check_feature(dev, DRIVER_MODESET))
250 + return -EINVAL;
251 +
252 mutex_lock(&dev->mode_config.mutex);
253
254 obj = drm_mode_object_find(dev, crtc_resp->crtc_id,
255 @@ -1312,6 +1318,9 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
256 uint64_t __user *prop_values;
257 uint32_t __user *encoder_ptr;
258
259 + if (!drm_core_check_feature(dev, DRIVER_MODESET))
260 + return -EINVAL;
261 +
262 memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo));
263
264 DRM_DEBUG_KMS("[CONNECTOR:%d:?]\n", out_resp->connector_id);
265 @@ -1431,6 +1440,9 @@ int drm_mode_getencoder(struct drm_device *dev, void *data,
266 struct drm_encoder *encoder;
267 int ret = 0;
268
269 + if (!drm_core_check_feature(dev, DRIVER_MODESET))
270 + return -EINVAL;
271 +
272 mutex_lock(&dev->mode_config.mutex);
273 obj = drm_mode_object_find(dev, enc_resp->encoder_id,
274 DRM_MODE_OBJECT_ENCODER);
275 @@ -1486,6 +1498,9 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
276 int ret = 0;
277 int i;
278
279 + if (!drm_core_check_feature(dev, DRIVER_MODESET))
280 + return -EINVAL;
281 +
282 mutex_lock(&dev->mode_config.mutex);
283 obj = drm_mode_object_find(dev, crtc_req->crtc_id,
284 DRM_MODE_OBJECT_CRTC);
285 @@ -1603,6 +1618,9 @@ int drm_mode_cursor_ioctl(struct drm_device *dev,
286 struct drm_crtc *crtc;
287 int ret = 0;
288
289 + if (!drm_core_check_feature(dev, DRIVER_MODESET))
290 + return -EINVAL;
291 +
292 if (!req->flags) {
293 DRM_ERROR("no operation set\n");
294 return -EINVAL;
295 @@ -1667,6 +1685,9 @@ int drm_mode_addfb(struct drm_device *dev,
296 struct drm_framebuffer *fb;
297 int ret = 0;
298
299 + if (!drm_core_check_feature(dev, DRIVER_MODESET))
300 + return -EINVAL;
301 +
302 if ((config->min_width > r->width) || (r->width > config->max_width)) {
303 DRM_ERROR("mode new framebuffer width not within limits\n");
304 return -EINVAL;
305 @@ -1724,6 +1745,9 @@ int drm_mode_rmfb(struct drm_device *dev,
306 int ret = 0;
307 int found = 0;
308
309 + if (!drm_core_check_feature(dev, DRIVER_MODESET))
310 + return -EINVAL;
311 +
312 mutex_lock(&dev->mode_config.mutex);
313 obj = drm_mode_object_find(dev, *id, DRM_MODE_OBJECT_FB);
314 /* TODO check that we realy get a framebuffer back. */
315 @@ -1780,6 +1804,9 @@ int drm_mode_getfb(struct drm_device *dev,
316 struct drm_framebuffer *fb;
317 int ret = 0;
318
319 + if (!drm_core_check_feature(dev, DRIVER_MODESET))
320 + return -EINVAL;
321 +
322 mutex_lock(&dev->mode_config.mutex);
323 obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
324 if (!obj) {
325 @@ -1813,6 +1840,9 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
326 int num_clips;
327 int ret = 0;
328
329 + if (!drm_core_check_feature(dev, DRIVER_MODESET))
330 + return -EINVAL;
331 +
332 mutex_lock(&dev->mode_config.mutex);
333 obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
334 if (!obj) {
335 @@ -1996,6 +2026,9 @@ int drm_mode_attachmode_ioctl(struct drm_device *dev,
336 struct drm_mode_modeinfo *umode = &mode_cmd->mode;
337 int ret = 0;
338
339 + if (!drm_core_check_feature(dev, DRIVER_MODESET))
340 + return -EINVAL;
341 +
342 mutex_lock(&dev->mode_config.mutex);
343
344 obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR);
345 @@ -2042,6 +2075,9 @@ int drm_mode_detachmode_ioctl(struct drm_device *dev,
346 struct drm_mode_modeinfo *umode = &mode_cmd->mode;
347 int ret = 0;
348
349 + if (!drm_core_check_feature(dev, DRIVER_MODESET))
350 + return -EINVAL;
351 +
352 mutex_lock(&dev->mode_config.mutex);
353
354 obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR);
355 @@ -2211,6 +2247,9 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
356 uint64_t __user *values_ptr;
357 uint32_t __user *blob_length_ptr;
358
359 + if (!drm_core_check_feature(dev, DRIVER_MODESET))
360 + return -EINVAL;
361 +
362 mutex_lock(&dev->mode_config.mutex);
363 obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY);
364 if (!obj) {
365 @@ -2333,6 +2372,9 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
366 int ret = 0;
367 void *blob_ptr;
368
369 + if (!drm_core_check_feature(dev, DRIVER_MODESET))
370 + return -EINVAL;
371 +
372 mutex_lock(&dev->mode_config.mutex);
373 obj = drm_mode_object_find(dev, out_resp->blob_id, DRM_MODE_OBJECT_BLOB);
374 if (!obj) {
375 @@ -2393,6 +2435,9 @@ int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
376 int ret = -EINVAL;
377 int i;
378
379 + if (!drm_core_check_feature(dev, DRIVER_MODESET))
380 + return -EINVAL;
381 +
382 mutex_lock(&dev->mode_config.mutex);
383
384 obj = drm_mode_object_find(dev, out_resp->connector_id, DRM_MODE_OBJECT_CONNECTOR);
385 @@ -2509,6 +2554,9 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev,
386 int size;
387 int ret = 0;
388
389 + if (!drm_core_check_feature(dev, DRIVER_MODESET))
390 + return -EINVAL;
391 +
392 mutex_lock(&dev->mode_config.mutex);
393 obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
394 if (!obj) {
395 @@ -2560,6 +2608,9 @@ int drm_mode_gamma_get_ioctl(struct drm_device *dev,
396 int size;
397 int ret = 0;
398
399 + if (!drm_core_check_feature(dev, DRIVER_MODESET))
400 + return -EINVAL;
401 +
402 mutex_lock(&dev->mode_config.mutex);
403 obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
404 if (!obj) {
405 diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
406 index ea1c4b0..c3c78ee 100644
407 --- a/drivers/gpu/drm/drm_gem.c
408 +++ b/drivers/gpu/drm/drm_gem.c
409 @@ -498,11 +498,12 @@ EXPORT_SYMBOL(drm_gem_vm_open);
410 void drm_gem_vm_close(struct vm_area_struct *vma)
411 {
412 struct drm_gem_object *obj = vma->vm_private_data;
413 + struct drm_device *dev = obj->dev;
414
415 - mutex_lock(&obj->dev->struct_mutex);
416 + mutex_lock(&dev->struct_mutex);
417 drm_vm_close_locked(vma);
418 drm_gem_object_unreference(obj);
419 - mutex_unlock(&obj->dev->struct_mutex);
420 + mutex_unlock(&dev->struct_mutex);
421 }
422 EXPORT_SYMBOL(drm_gem_vm_close);
423
424 diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
425 index 275ec6e..ecbe73d 100644
426 --- a/drivers/gpu/drm/i915/i915_gem.c
427 +++ b/drivers/gpu/drm/i915/i915_gem.c
428 @@ -1748,8 +1748,10 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
429 return;
430
431 spin_lock(&file_priv->mm.lock);
432 - list_del(&request->client_list);
433 - request->file_priv = NULL;
434 + if (request->file_priv) {
435 + list_del(&request->client_list);
436 + request->file_priv = NULL;
437 + }
438 spin_unlock(&file_priv->mm.lock);
439 }
440
441 diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
442 index ee14b8a..41262f4 100644
443 --- a/drivers/gpu/drm/radeon/atombios_crtc.c
444 +++ b/drivers/gpu/drm/radeon/atombios_crtc.c
445 @@ -921,7 +921,11 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
446 /* adjust pixel clock as needed */
447 adjusted_clock = atombios_adjust_pll(crtc, mode, pll, ss_enabled, &ss);
448
449 - if (ASIC_IS_AVIVO(rdev))
450 + if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
451 + /* TV seems to prefer the legacy algo on some boards */
452 + radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
453 + &ref_div, &post_div);
454 + else if (ASIC_IS_AVIVO(rdev))
455 radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
456 &ref_div, &post_div);
457 else
458 diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
459 index 57bee7e..2bf9982 100644
460 --- a/drivers/gpu/drm/radeon/radeon_combios.c
461 +++ b/drivers/gpu/drm/radeon/radeon_combios.c
462 @@ -448,7 +448,7 @@ static uint16_t combios_get_table_offset(struct drm_device *dev,
463
464 bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev)
465 {
466 - int edid_info;
467 + int edid_info, size;
468 struct edid *edid;
469 unsigned char *raw;
470 edid_info = combios_get_table_offset(rdev->ddev, COMBIOS_HARDCODED_EDID_TABLE);
471 @@ -456,11 +456,12 @@ bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev)
472 return false;
473
474 raw = rdev->bios + edid_info;
475 - edid = kmalloc(EDID_LENGTH * (raw[0x7e] + 1), GFP_KERNEL);
476 + size = EDID_LENGTH * (raw[0x7e] + 1);
477 + edid = kmalloc(size, GFP_KERNEL);
478 if (edid == NULL)
479 return false;
480
481 - memcpy((unsigned char *)edid, raw, EDID_LENGTH * (raw[0x7e] + 1));
482 + memcpy((unsigned char *)edid, raw, size);
483
484 if (!drm_edid_is_valid(edid)) {
485 kfree(edid);
486 @@ -468,14 +469,24 @@ bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev)
487 }
488
489 rdev->mode_info.bios_hardcoded_edid = edid;
490 + rdev->mode_info.bios_hardcoded_edid_size = size;
491 return true;
492 }
493
494 struct edid *
495 radeon_combios_get_hardcoded_edid(struct radeon_device *rdev)
496 {
497 - if (rdev->mode_info.bios_hardcoded_edid)
498 - return rdev->mode_info.bios_hardcoded_edid;
499 + struct edid *edid;
500 +
501 + if (rdev->mode_info.bios_hardcoded_edid) {
502 + edid = kmalloc(rdev->mode_info.bios_hardcoded_edid_size, GFP_KERNEL);
503 + if (edid) {
504 + memcpy((unsigned char *)edid,
505 + (unsigned char *)rdev->mode_info.bios_hardcoded_edid,
506 + rdev->mode_info.bios_hardcoded_edid_size);
507 + return edid;
508 + }
509 + }
510 return NULL;
511 }
512
513 diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
514 index 8afaf7a..2cc2a68 100644
515 --- a/drivers/gpu/drm/radeon/radeon_connectors.c
516 +++ b/drivers/gpu/drm/radeon/radeon_connectors.c
517 @@ -626,6 +626,8 @@ static int radeon_vga_mode_valid(struct drm_connector *connector,
518 static enum drm_connector_status
519 radeon_vga_detect(struct drm_connector *connector, bool force)
520 {
521 + struct drm_device *dev = connector->dev;
522 + struct radeon_device *rdev = dev->dev_private;
523 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
524 struct drm_encoder *encoder;
525 struct drm_encoder_helper_funcs *encoder_funcs;
526 @@ -676,6 +678,17 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
527
528 if (ret == connector_status_connected)
529 ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, true);
530 +
531 + /* RN50 and some RV100 asics in servers often have a hardcoded EDID in the
532 + * vbios to deal with KVMs. If we have one and are not able to detect a monitor
533 + * by other means, assume the CRT is connected and use that EDID.
534 + */
535 + if ((!rdev->is_atom_bios) &&
536 + (ret == connector_status_disconnected) &&
537 + rdev->mode_info.bios_hardcoded_edid_size) {
538 + ret = connector_status_connected;
539 + }
540 +
541 radeon_connector_update_scratch_regs(connector, ret);
542 return ret;
543 }
544 @@ -787,6 +800,8 @@ static int radeon_dvi_get_modes(struct drm_connector *connector)
545 static enum drm_connector_status
546 radeon_dvi_detect(struct drm_connector *connector, bool force)
547 {
548 + struct drm_device *dev = connector->dev;
549 + struct radeon_device *rdev = dev->dev_private;
550 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
551 struct drm_encoder *encoder = NULL;
552 struct drm_encoder_helper_funcs *encoder_funcs;
553 @@ -826,8 +841,6 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
554 * you don't really know what's connected to which port as both are digital.
555 */
556 if (radeon_connector->shared_ddc && (ret == connector_status_connected)) {
557 - struct drm_device *dev = connector->dev;
558 - struct radeon_device *rdev = dev->dev_private;
559 struct drm_connector *list_connector;
560 struct radeon_connector *list_radeon_connector;
561 list_for_each_entry(list_connector, &dev->mode_config.connector_list, head) {
562 @@ -892,6 +905,19 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
563 ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, true);
564 }
565
566 + /* RN50 and some RV100 asics in servers often have a hardcoded EDID in the
567 + * vbios to deal with KVMs. If we have one and are not able to detect a monitor
568 + * by other means, assume the DFP is connected and use that EDID. In most
569 + * cases the DVI port is actually a virtual KVM port connected to the service
570 + * processor.
571 + */
572 + if ((!rdev->is_atom_bios) &&
573 + (ret == connector_status_disconnected) &&
574 + rdev->mode_info.bios_hardcoded_edid_size) {
575 + radeon_connector->use_digital = true;
576 + ret = connector_status_connected;
577 + }
578 +
579 out:
580 /* updated in get modes as well since we need to know if it's analog or digital */
581 radeon_connector_update_scratch_regs(connector, ret);
582 diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
583 index 2615e51..76614a7 100644
584 --- a/drivers/gpu/drm/radeon/radeon_mode.h
585 +++ b/drivers/gpu/drm/radeon/radeon_mode.h
586 @@ -239,6 +239,7 @@ struct radeon_mode_info {
587 struct drm_property *underscan_vborder_property;
588 /* hardcoded DFP edid from BIOS */
589 struct edid *bios_hardcoded_edid;
590 + int bios_hardcoded_edid_size;
591
592 /* pointer to fbdev info structure */
593 struct radeon_fbdev *rfbdev;
594 diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
595 index a1e141e..d3d6ee1 100644
596 --- a/drivers/idle/intel_idle.c
597 +++ b/drivers/idle/intel_idle.c
598 @@ -62,6 +62,7 @@
599 #include <linux/notifier.h>
600 #include <linux/cpu.h>
601 #include <asm/mwait.h>
602 +#include <asm/msr.h>
603
604 #define INTEL_IDLE_VERSION "0.4"
605 #define PREFIX "intel_idle: "
606 @@ -85,6 +86,12 @@ static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state);
607 static struct cpuidle_state *cpuidle_state_table;
608
609 /*
610 + * Hardware C-state auto-demotion may not always be optimal.
611 + * Indicate which enable bits to clear here.
612 + */
613 +static unsigned long long auto_demotion_disable_flags;
614 +
615 +/*
616 * States are indexed by the cstate number,
617 * which is also the index into the MWAIT hint array.
618 * Thus C0 is a dummy.
619 @@ -276,6 +283,15 @@ static struct notifier_block setup_broadcast_notifier = {
620 .notifier_call = setup_broadcast_cpuhp_notify,
621 };
622
623 +static void auto_demotion_disable(void *dummy)
624 +{
625 + unsigned long long msr_bits;
626 +
627 + rdmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits);
628 + msr_bits &= ~auto_demotion_disable_flags;
629 + wrmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits);
630 +}
631 +
632 /*
633 * intel_idle_probe()
634 */
635 @@ -319,11 +335,17 @@ static int intel_idle_probe(void)
636 case 0x25: /* Westmere */
637 case 0x2C: /* Westmere */
638 cpuidle_state_table = nehalem_cstates;
639 + auto_demotion_disable_flags =
640 + (NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE);
641 break;
642
643 case 0x1C: /* 28 - Atom Processor */
644 + cpuidle_state_table = atom_cstates;
645 + break;
646 +
647 case 0x26: /* 38 - Lincroft Atom Processor */
648 cpuidle_state_table = atom_cstates;
649 + auto_demotion_disable_flags = ATM_LNC_C6_AUTO_DEMOTE;
650 break;
651
652 case 0x2A: /* SNB */
653 @@ -431,6 +453,8 @@ static int intel_idle_cpuidle_devices_init(void)
654 return -EIO;
655 }
656 }
657 + if (auto_demotion_disable_flags)
658 + smp_call_function(auto_demotion_disable, NULL, 1);
659
660 return 0;
661 }
662 diff --git a/drivers/input/xen-kbdfront.c b/drivers/input/xen-kbdfront.c
663 index e0c024d..4ec0a98 100644
664 --- a/drivers/input/xen-kbdfront.c
665 +++ b/drivers/input/xen-kbdfront.c
666 @@ -109,7 +109,7 @@ static irqreturn_t input_handler(int rq, void *dev_id)
667 static int __devinit xenkbd_probe(struct xenbus_device *dev,
668 const struct xenbus_device_id *id)
669 {
670 - int ret, i;
671 + int ret, i, abs;
672 struct xenkbd_info *info;
673 struct input_dev *kbd, *ptr;
674
675 @@ -127,6 +127,11 @@ static int __devinit xenkbd_probe(struct xenbus_device *dev,
676 if (!info->page)
677 goto error_nomem;
678
679 + if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-abs-pointer", "%d", &abs) < 0)
680 + abs = 0;
681 + if (abs)
682 + xenbus_printf(XBT_NIL, dev->nodename, "request-abs-pointer", "1");
683 +
684 /* keyboard */
685 kbd = input_allocate_device();
686 if (!kbd)
687 @@ -136,11 +141,12 @@ static int __devinit xenkbd_probe(struct xenbus_device *dev,
688 kbd->id.bustype = BUS_PCI;
689 kbd->id.vendor = 0x5853;
690 kbd->id.product = 0xffff;
691 - kbd->evbit[0] = BIT(EV_KEY);
692 +
693 + __set_bit(EV_KEY, kbd->evbit);
694 for (i = KEY_ESC; i < KEY_UNKNOWN; i++)
695 - set_bit(i, kbd->keybit);
696 + __set_bit(i, kbd->keybit);
697 for (i = KEY_OK; i < KEY_MAX; i++)
698 - set_bit(i, kbd->keybit);
699 + __set_bit(i, kbd->keybit);
700
701 ret = input_register_device(kbd);
702 if (ret) {
703 @@ -159,12 +165,20 @@ static int __devinit xenkbd_probe(struct xenbus_device *dev,
704 ptr->id.bustype = BUS_PCI;
705 ptr->id.vendor = 0x5853;
706 ptr->id.product = 0xfffe;
707 - ptr->evbit[0] = BIT(EV_KEY) | BIT(EV_REL) | BIT(EV_ABS);
708 +
709 + if (abs) {
710 + __set_bit(EV_ABS, ptr->evbit);
711 + input_set_abs_params(ptr, ABS_X, 0, XENFB_WIDTH, 0, 0);
712 + input_set_abs_params(ptr, ABS_Y, 0, XENFB_HEIGHT, 0, 0);
713 + } else {
714 + input_set_capability(ptr, EV_REL, REL_X);
715 + input_set_capability(ptr, EV_REL, REL_Y);
716 + }
717 + input_set_capability(ptr, EV_REL, REL_WHEEL);
718 +
719 + __set_bit(EV_KEY, ptr->evbit);
720 for (i = BTN_LEFT; i <= BTN_TASK; i++)
721 - set_bit(i, ptr->keybit);
722 - ptr->relbit[0] = BIT(REL_X) | BIT(REL_Y) | BIT(REL_WHEEL);
723 - input_set_abs_params(ptr, ABS_X, 0, XENFB_WIDTH, 0, 0);
724 - input_set_abs_params(ptr, ABS_Y, 0, XENFB_HEIGHT, 0, 0);
725 + __set_bit(i, ptr->keybit);
726
727 ret = input_register_device(ptr);
728 if (ret) {
729 @@ -271,7 +285,7 @@ static void xenkbd_backend_changed(struct xenbus_device *dev,
730 enum xenbus_state backend_state)
731 {
732 struct xenkbd_info *info = dev_get_drvdata(&dev->dev);
733 - int ret, val;
734 + int val;
735
736 switch (backend_state) {
737 case XenbusStateInitialising:
738 @@ -284,17 +298,6 @@ static void xenkbd_backend_changed(struct xenbus_device *dev,
739
740 case XenbusStateInitWait:
741 InitWait:
742 - ret = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
743 - "feature-abs-pointer", "%d", &val);
744 - if (ret < 0)
745 - val = 0;
746 - if (val) {
747 - ret = xenbus_printf(XBT_NIL, info->xbdev->nodename,
748 - "request-abs-pointer", "1");
749 - if (ret)
750 - printk(KERN_WARNING
751 - "xenkbd: can't request abs-pointer");
752 - }
753 xenbus_switch_state(dev, XenbusStateConnected);
754 break;
755
756 diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c
757 index a1e9dfb..6459b8c 100644
758 --- a/drivers/media/video/uvc/uvc_driver.c
759 +++ b/drivers/media/video/uvc/uvc_driver.c
760 @@ -1264,6 +1264,14 @@ static int uvc_scan_chain_entity(struct uvc_video_chain *chain,
761
762 break;
763
764 + case UVC_OTT_VENDOR_SPECIFIC:
765 + case UVC_OTT_DISPLAY:
766 + case UVC_OTT_MEDIA_TRANSPORT_OUTPUT:
767 + if (uvc_trace_param & UVC_TRACE_PROBE)
768 + printk(" OT %d", entity->id);
769 +
770 + break;
771 +
772 case UVC_TT_STREAMING:
773 if (UVC_ENTITY_IS_ITERM(entity)) {
774 if (uvc_trace_param & UVC_TRACE_PROBE)
775 diff --git a/drivers/media/video/uvc/uvc_video.c b/drivers/media/video/uvc/uvc_video.c
776 index 5673d67..545c029 100644
777 --- a/drivers/media/video/uvc/uvc_video.c
778 +++ b/drivers/media/video/uvc/uvc_video.c
779 @@ -89,15 +89,19 @@ int uvc_query_ctrl(struct uvc_device *dev, __u8 query, __u8 unit,
780 static void uvc_fixup_video_ctrl(struct uvc_streaming *stream,
781 struct uvc_streaming_control *ctrl)
782 {
783 - struct uvc_format *format;
784 + struct uvc_format *format = NULL;
785 struct uvc_frame *frame = NULL;
786 unsigned int i;
787
788 - if (ctrl->bFormatIndex <= 0 ||
789 - ctrl->bFormatIndex > stream->nformats)
790 - return;
791 + for (i = 0; i < stream->nformats; ++i) {
792 + if (stream->format[i].index == ctrl->bFormatIndex) {
793 + format = &stream->format[i];
794 + break;
795 + }
796 + }
797
798 - format = &stream->format[ctrl->bFormatIndex - 1];
799 + if (format == NULL)
800 + return;
801
802 for (i = 0; i < format->nframes; ++i) {
803 if (format->frame[i].bFrameIndex == ctrl->bFrameIndex) {
804 diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
805 index cb23aa2..e610cfe 100644
806 --- a/drivers/pci/hotplug/acpiphp_glue.c
807 +++ b/drivers/pci/hotplug/acpiphp_glue.c
808 @@ -212,6 +212,7 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
809
810 pdev = pci_get_slot(pbus, PCI_DEVFN(device, function));
811 if (pdev) {
812 + pdev->current_state = PCI_D0;
813 slot->flags |= (SLOT_ENABLED | SLOT_POWEREDON);
814 pci_dev_put(pdev);
815 }
816 diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
817 index 4ab49d4..30bb8d0 100644
818 --- a/drivers/usb/class/cdc-acm.c
819 +++ b/drivers/usb/class/cdc-acm.c
820 @@ -297,6 +297,8 @@ static void acm_ctrl_irq(struct urb *urb)
821 if (!ACM_READY(acm))
822 goto exit;
823
824 + usb_mark_last_busy(acm->dev);
825 +
826 data = (unsigned char *)(dr + 1);
827 switch (dr->bNotificationType) {
828 case USB_CDC_NOTIFY_NETWORK_CONNECTION:
829 @@ -336,7 +338,6 @@ static void acm_ctrl_irq(struct urb *urb)
830 break;
831 }
832 exit:
833 - usb_mark_last_busy(acm->dev);
834 retval = usb_submit_urb(urb, GFP_ATOMIC);
835 if (retval)
836 dev_err(&urb->dev->dev, "%s - usb_submit_urb failed with "
837 @@ -533,6 +534,8 @@ static void acm_softint(struct work_struct *work)
838 if (!ACM_READY(acm))
839 return;
840 tty = tty_port_tty_get(&acm->port);
841 + if (!tty)
842 + return;
843 tty_wakeup(tty);
844 tty_kref_put(tty);
845 }
846 @@ -646,8 +649,10 @@ static void acm_port_down(struct acm *acm)
847 usb_kill_urb(acm->ctrlurb);
848 for (i = 0; i < ACM_NW; i++)
849 usb_kill_urb(acm->wb[i].urb);
850 + tasklet_disable(&acm->urb_task);
851 for (i = 0; i < nr; i++)
852 usb_kill_urb(acm->ru[i].urb);
853 + tasklet_enable(&acm->urb_task);
854 acm->control->needs_remote_wakeup = 0;
855 usb_autopm_put_interface(acm->control);
856 }
857 diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
858 index 6ee4451..d0cd8b6 100644
859 --- a/drivers/usb/class/cdc-wdm.c
860 +++ b/drivers/usb/class/cdc-wdm.c
861 @@ -281,7 +281,7 @@ static void cleanup(struct wdm_device *desc)
862 desc->sbuf,
863 desc->validity->transfer_dma);
864 usb_free_coherent(interface_to_usbdev(desc->intf),
865 - desc->wMaxCommand,
866 + desc->bMaxPacketSize0,
867 desc->inbuf,
868 desc->response->transfer_dma);
869 kfree(desc->orq);
870 diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
871 index a7131ad..37518df 100644
872 --- a/drivers/usb/core/devio.c
873 +++ b/drivers/usb/core/devio.c
874 @@ -802,7 +802,7 @@ static int proc_control(struct dev_state *ps, void __user *arg)
875 tbuf, ctrl.wLength, tmo);
876 usb_lock_device(dev);
877 snoop_urb(dev, NULL, pipe, max(i, 0), min(i, 0), COMPLETE,
878 - tbuf, i);
879 + tbuf, max(i, 0));
880 if ((i > 0) && ctrl.wLength) {
881 if (copy_to_user(ctrl.data, tbuf, i)) {
882 free_page((unsigned long)tbuf);
883 diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
884 index 233c288..5add8b5 100644
885 --- a/drivers/usb/host/ehci-q.c
886 +++ b/drivers/usb/host/ehci-q.c
887 @@ -315,7 +315,6 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
888 int stopped;
889 unsigned count = 0;
890 u8 state;
891 - const __le32 halt = HALT_BIT(ehci);
892 struct ehci_qh_hw *hw = qh->hw;
893
894 if (unlikely (list_empty (&qh->qtd_list)))
895 @@ -422,7 +421,6 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
896 && !(qtd->hw_alt_next
897 & EHCI_LIST_END(ehci))) {
898 stopped = 1;
899 - goto halt;
900 }
901
902 /* stop scanning when we reach qtds the hc is using */
903 @@ -456,16 +454,6 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
904 */
905 ehci_clear_tt_buffer(ehci, qh, urb, token);
906 }
907 -
908 - /* force halt for unlinked or blocked qh, so we'll
909 - * patch the qh later and so that completions can't
910 - * activate it while we "know" it's stopped.
911 - */
912 - if ((halt & hw->hw_token) == 0) {
913 -halt:
914 - hw->hw_token |= halt;
915 - wmb ();
916 - }
917 }
918
919 /* unless we already know the urb's status, collect qtd status
920 diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c
921 index 4ff2158..9727daa 100644
922 --- a/drivers/usb/misc/uss720.c
923 +++ b/drivers/usb/misc/uss720.c
924 @@ -177,12 +177,11 @@ static struct uss720_async_request *submit_async_request(struct parport_uss720_p
925 spin_lock_irqsave(&priv->asynclock, flags);
926 list_add_tail(&rq->asynclist, &priv->asynclist);
927 spin_unlock_irqrestore(&priv->asynclock, flags);
928 + kref_get(&rq->ref_count);
929 ret = usb_submit_urb(rq->urb, mem_flags);
930 - if (!ret) {
931 - kref_get(&rq->ref_count);
932 + if (!ret)
933 return rq;
934 - }
935 - kref_put(&rq->ref_count, destroy_async);
936 + destroy_async(&rq->ref_count);
937 err("submit_async_request submit_urb failed with %d", ret);
938 return NULL;
939 }
940 diff --git a/drivers/video/console/tileblit.c b/drivers/video/console/tileblit.c
941 index 0056a41..15e8e1a 100644
942 --- a/drivers/video/console/tileblit.c
943 +++ b/drivers/video/console/tileblit.c
944 @@ -83,7 +83,7 @@ static void tile_cursor(struct vc_data *vc, struct fb_info *info, int mode,
945 int softback_lines, int fg, int bg)
946 {
947 struct fb_tilecursor cursor;
948 - int use_sw = (vc->vc_cursor_type & 0x01);
949 + int use_sw = (vc->vc_cursor_type & 0x10);
950
951 cursor.sx = vc->vc_x;
952 cursor.sy = vc->vc_y;
953 diff --git a/fs/aio.c b/fs/aio.c
954 index 8c8f6c5..72b3eef 100644
955 --- a/fs/aio.c
956 +++ b/fs/aio.c
957 @@ -512,7 +512,7 @@ static inline void really_put_req(struct kioctx *ctx, struct kiocb *req)
958 ctx->reqs_active--;
959
960 if (unlikely(!ctx->reqs_active && ctx->dead))
961 - wake_up(&ctx->wait);
962 + wake_up_all(&ctx->wait);
963 }
964
965 static void aio_fput_routine(struct work_struct *data)
966 @@ -1233,7 +1233,7 @@ static void io_destroy(struct kioctx *ioctx)
967 * by other CPUs at this point. Right now, we rely on the
968 * locking done by the above calls to ensure this consistency.
969 */
970 - wake_up(&ioctx->wait);
971 + wake_up_all(&ioctx->wait);
972 put_ioctx(ioctx); /* once for the lookup */
973 }
974
975 diff --git a/fs/dcache.c b/fs/dcache.c
976 index 23702a9..119d489 100644
977 --- a/fs/dcache.c
978 +++ b/fs/dcache.c
979 @@ -1201,9 +1201,12 @@ struct dentry *d_obtain_alias(struct inode *inode)
980 spin_unlock(&tmp->d_lock);
981
982 spin_unlock(&dcache_lock);
983 + security_d_instantiate(tmp, inode);
984 return tmp;
985
986 out_iput:
987 + if (res && !IS_ERR(res))
988 + security_d_instantiate(res, inode);
989 iput(inode);
990 return res;
991 }
992 diff --git a/fs/ext3/super.c b/fs/ext3/super.c
993 index acf8695..1dc91dd 100644
994 --- a/fs/ext3/super.c
995 +++ b/fs/ext3/super.c
996 @@ -1440,6 +1440,13 @@ static void ext3_orphan_cleanup (struct super_block * sb,
997 return;
998 }
999
1000 + /* Check if feature set allows readwrite operations */
1001 + if (EXT3_HAS_RO_COMPAT_FEATURE(sb, ~EXT3_FEATURE_RO_COMPAT_SUPP)) {
1002 + ext3_msg(sb, KERN_INFO, "Skipping orphan cleanup due to "
1003 + "unknown ROCOMPAT features");
1004 + return;
1005 + }
1006 +
1007 if (EXT3_SB(sb)->s_mount_state & EXT3_ERROR_FS) {
1008 if (es->s_last_orphan)
1009 jbd_debug(1, "Errors on filesystem, "
1010 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
1011 index 851eac3..288f7de 100644
1012 --- a/fs/ext4/super.c
1013 +++ b/fs/ext4/super.c
1014 @@ -75,6 +75,7 @@ static void ext4_write_super(struct super_block *sb);
1015 static int ext4_freeze(struct super_block *sb);
1016 static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,
1017 const char *dev_name, void *data);
1018 +static int ext4_feature_set_ok(struct super_block *sb, int readonly);
1019 static void ext4_destroy_lazyinit_thread(void);
1020 static void ext4_unregister_li_request(struct super_block *sb);
1021 static void ext4_clear_request_list(void);
1022 @@ -2107,6 +2108,13 @@ static void ext4_orphan_cleanup(struct super_block *sb,
1023 return;
1024 }
1025
1026 + /* Check if feature set would not allow a r/w mount */
1027 + if (!ext4_feature_set_ok(sb, 0)) {
1028 + ext4_msg(sb, KERN_INFO, "Skipping orphan cleanup due to "
1029 + "unknown ROCOMPAT features");
1030 + return;
1031 + }
1032 +
1033 if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
1034 if (es->s_last_orphan)
1035 jbd_debug(1, "Errors on filesystem, "
1036 diff --git a/fs/nfs/write.c b/fs/nfs/write.c
1037 index 10d648e..76ffb14 100644
1038 --- a/fs/nfs/write.c
1039 +++ b/fs/nfs/write.c
1040 @@ -1214,13 +1214,17 @@ int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
1041 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1042 static int nfs_commit_set_lock(struct nfs_inode *nfsi, int may_wait)
1043 {
1044 + int ret;
1045 +
1046 if (!test_and_set_bit(NFS_INO_COMMIT, &nfsi->flags))
1047 return 1;
1048 - if (may_wait && !out_of_line_wait_on_bit_lock(&nfsi->flags,
1049 - NFS_INO_COMMIT, nfs_wait_bit_killable,
1050 - TASK_KILLABLE))
1051 - return 1;
1052 - return 0;
1053 + if (!may_wait)
1054 + return 0;
1055 + ret = out_of_line_wait_on_bit_lock(&nfsi->flags,
1056 + NFS_INO_COMMIT,
1057 + nfs_wait_bit_killable,
1058 + TASK_KILLABLE);
1059 + return (ret < 0) ? ret : 1;
1060 }
1061
1062 static void nfs_commit_clear_lock(struct nfs_inode *nfsi)
1063 @@ -1394,9 +1398,10 @@ int nfs_commit_inode(struct inode *inode, int how)
1064 {
1065 LIST_HEAD(head);
1066 int may_wait = how & FLUSH_SYNC;
1067 - int res = 0;
1068 + int res;
1069
1070 - if (!nfs_commit_set_lock(NFS_I(inode), may_wait))
1071 + res = nfs_commit_set_lock(NFS_I(inode), may_wait);
1072 + if (res <= 0)
1073 goto out_mark_dirty;
1074 spin_lock(&inode->i_lock);
1075 res = nfs_scan_commit(inode, &head, 0, 0);
1076 @@ -1405,12 +1410,14 @@ int nfs_commit_inode(struct inode *inode, int how)
1077 int error = nfs_commit_list(inode, &head, how);
1078 if (error < 0)
1079 return error;
1080 - if (may_wait)
1081 - wait_on_bit(&NFS_I(inode)->flags, NFS_INO_COMMIT,
1082 - nfs_wait_bit_killable,
1083 - TASK_KILLABLE);
1084 - else
1085 + if (!may_wait)
1086 goto out_mark_dirty;
1087 + error = wait_on_bit(&NFS_I(inode)->flags,
1088 + NFS_INO_COMMIT,
1089 + nfs_wait_bit_killable,
1090 + TASK_KILLABLE);
1091 + if (error < 0)
1092 + return error;
1093 } else
1094 nfs_commit_clear_lock(NFS_I(inode));
1095 return res;
1096 diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
1097 index 0cdfd02..ba16492 100644
1098 --- a/fs/nfsd/nfs4proc.c
1099 +++ b/fs/nfsd/nfs4proc.c
1100 @@ -954,8 +954,8 @@ typedef __be32(*nfsd4op_func)(struct svc_rqst *, struct nfsd4_compound_state *,
1101 void *);
1102 enum nfsd4_op_flags {
1103 ALLOWED_WITHOUT_FH = 1 << 0, /* No current filehandle required */
1104 - ALLOWED_ON_ABSENT_FS = 2 << 0, /* ops processed on absent fs */
1105 - ALLOWED_AS_FIRST_OP = 3 << 0, /* ops reqired first in compound */
1106 + ALLOWED_ON_ABSENT_FS = 1 << 1, /* ops processed on absent fs */
1107 + ALLOWED_AS_FIRST_OP = 1 << 2, /* ops reqired first in compound */
1108 };
1109
1110 struct nfsd4_operation {
1111 diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
1112 index 116cab9..b517814 100644
1113 --- a/fs/nfsd/nfs4state.c
1114 +++ b/fs/nfsd/nfs4state.c
1115 @@ -327,64 +327,6 @@ static struct list_head unconf_id_hashtbl[CLIENT_HASH_SIZE];
1116 static struct list_head client_lru;
1117 static struct list_head close_lru;
1118
1119 -static void unhash_generic_stateid(struct nfs4_stateid *stp)
1120 -{
1121 - list_del(&stp->st_hash);
1122 - list_del(&stp->st_perfile);
1123 - list_del(&stp->st_perstateowner);
1124 -}
1125 -
1126 -static void free_generic_stateid(struct nfs4_stateid *stp)
1127 -{
1128 - put_nfs4_file(stp->st_file);
1129 - kmem_cache_free(stateid_slab, stp);
1130 -}
1131 -
1132 -static void release_lock_stateid(struct nfs4_stateid *stp)
1133 -{
1134 - struct file *file;
1135 -
1136 - unhash_generic_stateid(stp);
1137 - file = find_any_file(stp->st_file);
1138 - if (file)
1139 - locks_remove_posix(file, (fl_owner_t)stp->st_stateowner);
1140 - free_generic_stateid(stp);
1141 -}
1142 -
1143 -static void unhash_lockowner(struct nfs4_stateowner *sop)
1144 -{
1145 - struct nfs4_stateid *stp;
1146 -
1147 - list_del(&sop->so_idhash);
1148 - list_del(&sop->so_strhash);
1149 - list_del(&sop->so_perstateid);
1150 - while (!list_empty(&sop->so_stateids)) {
1151 - stp = list_first_entry(&sop->so_stateids,
1152 - struct nfs4_stateid, st_perstateowner);
1153 - release_lock_stateid(stp);
1154 - }
1155 -}
1156 -
1157 -static void release_lockowner(struct nfs4_stateowner *sop)
1158 -{
1159 - unhash_lockowner(sop);
1160 - nfs4_put_stateowner(sop);
1161 -}
1162 -
1163 -static void
1164 -release_stateid_lockowners(struct nfs4_stateid *open_stp)
1165 -{
1166 - struct nfs4_stateowner *lock_sop;
1167 -
1168 - while (!list_empty(&open_stp->st_lockowners)) {
1169 - lock_sop = list_entry(open_stp->st_lockowners.next,
1170 - struct nfs4_stateowner, so_perstateid);
1171 - /* list_del(&open_stp->st_lockowners); */
1172 - BUG_ON(lock_sop->so_is_open_owner);
1173 - release_lockowner(lock_sop);
1174 - }
1175 -}
1176 -
1177 /*
1178 * We store the NONE, READ, WRITE, and BOTH bits separately in the
1179 * st_{access,deny}_bmap field of the stateid, in order to track not
1180 @@ -457,13 +399,71 @@ static int nfs4_access_bmap_to_omode(struct nfs4_stateid *stp)
1181 return nfs4_access_to_omode(access);
1182 }
1183
1184 -static void release_open_stateid(struct nfs4_stateid *stp)
1185 +static void unhash_generic_stateid(struct nfs4_stateid *stp)
1186 +{
1187 + list_del(&stp->st_hash);
1188 + list_del(&stp->st_perfile);
1189 + list_del(&stp->st_perstateowner);
1190 +}
1191 +
1192 +static void free_generic_stateid(struct nfs4_stateid *stp)
1193 {
1194 int oflag = nfs4_access_bmap_to_omode(stp);
1195
1196 + nfs4_file_put_access(stp->st_file, oflag);
1197 + put_nfs4_file(stp->st_file);
1198 + kmem_cache_free(stateid_slab, stp);
1199 +}
1200 +
1201 +static void release_lock_stateid(struct nfs4_stateid *stp)
1202 +{
1203 + struct file *file;
1204 +
1205 + unhash_generic_stateid(stp);
1206 + file = find_any_file(stp->st_file);
1207 + if (file)
1208 + locks_remove_posix(file, (fl_owner_t)stp->st_stateowner);
1209 + free_generic_stateid(stp);
1210 +}
1211 +
1212 +static void unhash_lockowner(struct nfs4_stateowner *sop)
1213 +{
1214 + struct nfs4_stateid *stp;
1215 +
1216 + list_del(&sop->so_idhash);
1217 + list_del(&sop->so_strhash);
1218 + list_del(&sop->so_perstateid);
1219 + while (!list_empty(&sop->so_stateids)) {
1220 + stp = list_first_entry(&sop->so_stateids,
1221 + struct nfs4_stateid, st_perstateowner);
1222 + release_lock_stateid(stp);
1223 + }
1224 +}
1225 +
1226 +static void release_lockowner(struct nfs4_stateowner *sop)
1227 +{
1228 + unhash_lockowner(sop);
1229 + nfs4_put_stateowner(sop);
1230 +}
1231 +
1232 +static void
1233 +release_stateid_lockowners(struct nfs4_stateid *open_stp)
1234 +{
1235 + struct nfs4_stateowner *lock_sop;
1236 +
1237 + while (!list_empty(&open_stp->st_lockowners)) {
1238 + lock_sop = list_entry(open_stp->st_lockowners.next,
1239 + struct nfs4_stateowner, so_perstateid);
1240 + /* list_del(&open_stp->st_lockowners); */
1241 + BUG_ON(lock_sop->so_is_open_owner);
1242 + release_lockowner(lock_sop);
1243 + }
1244 +}
1245 +
1246 +static void release_open_stateid(struct nfs4_stateid *stp)
1247 +{
1248 unhash_generic_stateid(stp);
1249 release_stateid_lockowners(stp);
1250 - nfs4_file_put_access(stp->st_file, oflag);
1251 free_generic_stateid(stp);
1252 }
1253
1254 @@ -3661,6 +3661,7 @@ alloc_init_lock_stateid(struct nfs4_stateowner *sop, struct nfs4_file *fp, struc
1255 stp->st_stateid.si_stateownerid = sop->so_id;
1256 stp->st_stateid.si_fileid = fp->fi_id;
1257 stp->st_stateid.si_generation = 0;
1258 + stp->st_access_bmap = 0;
1259 stp->st_deny_bmap = open_stp->st_deny_bmap;
1260 stp->st_openstp = open_stp;
1261
1262 @@ -3675,6 +3676,17 @@ check_lock_length(u64 offset, u64 length)
1263 LOFF_OVERFLOW(offset, length)));
1264 }
1265
1266 +static void get_lock_access(struct nfs4_stateid *lock_stp, u32 access)
1267 +{
1268 + struct nfs4_file *fp = lock_stp->st_file;
1269 + int oflag = nfs4_access_to_omode(access);
1270 +
1271 + if (test_bit(access, &lock_stp->st_access_bmap))
1272 + return;
1273 + nfs4_file_get_access(fp, oflag);
1274 + __set_bit(access, &lock_stp->st_access_bmap);
1275 +}
1276 +
1277 /*
1278 * LOCK operation
1279 */
1280 @@ -3691,7 +3703,6 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
1281 struct file_lock conflock;
1282 __be32 status = 0;
1283 unsigned int strhashval;
1284 - unsigned int cmd;
1285 int err;
1286
1287 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
1288 @@ -3773,22 +3784,18 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
1289 switch (lock->lk_type) {
1290 case NFS4_READ_LT:
1291 case NFS4_READW_LT:
1292 - if (find_readable_file(lock_stp->st_file)) {
1293 - nfs4_get_vfs_file(rqstp, fp, &cstate->current_fh, NFS4_SHARE_ACCESS_READ);
1294 - filp = find_readable_file(lock_stp->st_file);
1295 - }
1296 + filp = find_readable_file(lock_stp->st_file);
1297 + if (filp)
1298 + get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ);
1299 file_lock.fl_type = F_RDLCK;
1300 - cmd = F_SETLK;
1301 - break;
1302 + break;
1303 case NFS4_WRITE_LT:
1304 case NFS4_WRITEW_LT:
1305 - if (find_writeable_file(lock_stp->st_file)) {
1306 - nfs4_get_vfs_file(rqstp, fp, &cstate->current_fh, NFS4_SHARE_ACCESS_WRITE);
1307 - filp = find_writeable_file(lock_stp->st_file);
1308 - }
1309 + filp = find_writeable_file(lock_stp->st_file);
1310 + if (filp)
1311 + get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE);
1312 file_lock.fl_type = F_WRLCK;
1313 - cmd = F_SETLK;
1314 - break;
1315 + break;
1316 default:
1317 status = nfserr_inval;
1318 goto out;
1319 @@ -3812,7 +3819,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
1320 * Note: locks.c uses the BKL to protect the inode's lock list.
1321 */
1322
1323 - err = vfs_lock_file(filp, cmd, &file_lock, &conflock);
1324 + err = vfs_lock_file(filp, F_SETLK, &file_lock, &conflock);
1325 switch (-err) {
1326 case 0: /* success! */
1327 update_stateid(&lock_stp->st_stateid);
1328 diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
1329 index 5cf8fa3..883fc4d 100644
1330 --- a/fs/nfsd/nfs4xdr.c
1331 +++ b/fs/nfsd/nfs4xdr.c
1332 @@ -1107,7 +1107,7 @@ nfsd4_decode_create_session(struct nfsd4_compoundargs *argp,
1333
1334 u32 dummy;
1335 char *machine_name;
1336 - int i, j;
1337 + int i;
1338 int nr_secflavs;
1339
1340 READ_BUF(16);
1341 @@ -1180,8 +1180,6 @@ nfsd4_decode_create_session(struct nfsd4_compoundargs *argp,
1342 READ_BUF(4);
1343 READ32(dummy);
1344 READ_BUF(dummy * 4);
1345 - for (j = 0; j < dummy; ++j)
1346 - READ32(dummy);
1347 break;
1348 case RPC_AUTH_GSS:
1349 dprintk("RPC_AUTH_GSS callback secflavor "
1350 @@ -1197,7 +1195,6 @@ nfsd4_decode_create_session(struct nfsd4_compoundargs *argp,
1351 READ_BUF(4);
1352 READ32(dummy);
1353 READ_BUF(dummy);
1354 - p += XDR_QUADLEN(dummy);
1355 break;
1356 default:
1357 dprintk("Illegal callback secflavor\n");
1358 diff --git a/fs/proc/array.c b/fs/proc/array.c
1359 index 3d88fe1..9e5f430 100644
1360 --- a/fs/proc/array.c
1361 +++ b/fs/proc/array.c
1362 @@ -489,8 +489,8 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
1363 vsize,
1364 mm ? get_mm_rss(mm) : 0,
1365 rsslim,
1366 - mm ? mm->start_code : 0,
1367 - mm ? mm->end_code : 0,
1368 + mm ? (permitted ? mm->start_code : 1) : 0,
1369 + mm ? (permitted ? mm->end_code : 1) : 0,
1370 (permitted && mm) ? mm->start_stack : 0,
1371 esp,
1372 eip,
1373 diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
1374 index c126c83..ec93041 100644
1375 --- a/fs/proc/task_mmu.c
1376 +++ b/fs/proc/task_mmu.c
1377 @@ -248,8 +248,8 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
1378 const char *name = arch_vma_name(vma);
1379 if (!name) {
1380 if (mm) {
1381 - if (vma->vm_start <= mm->start_brk &&
1382 - vma->vm_end >= mm->brk) {
1383 + if (vma->vm_start <= mm->brk &&
1384 + vma->vm_end >= mm->start_brk) {
1385 name = "[heap]";
1386 } else if (vma->vm_start <= mm->start_stack &&
1387 vma->vm_end >= mm->start_stack) {
1388 diff --git a/fs/super.c b/fs/super.c
1389 index ca69615..302356f 100644
1390 --- a/fs/super.c
1391 +++ b/fs/super.c
1392 @@ -70,6 +70,7 @@ static struct super_block *alloc_super(struct file_system_type *type)
1393 #else
1394 INIT_LIST_HEAD(&s->s_files);
1395 #endif
1396 + s->s_bdi = &default_backing_dev_info;
1397 INIT_LIST_HEAD(&s->s_instances);
1398 INIT_HLIST_HEAD(&s->s_anon);
1399 INIT_LIST_HEAD(&s->s_inodes);
1400 @@ -996,6 +997,7 @@ vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void
1401 }
1402 BUG_ON(!mnt->mnt_sb);
1403 WARN_ON(!mnt->mnt_sb->s_bdi);
1404 + WARN_ON(mnt->mnt_sb->s_bdi == &default_backing_dev_info);
1405 mnt->mnt_sb->s_flags |= MS_BORN;
1406
1407 error = security_sb_kern_mount(mnt->mnt_sb, flags, secdata);
1408 diff --git a/fs/sync.c b/fs/sync.c
1409 index ba76b96..412dc89 100644
1410 --- a/fs/sync.c
1411 +++ b/fs/sync.c
1412 @@ -33,7 +33,7 @@ static int __sync_filesystem(struct super_block *sb, int wait)
1413 * This should be safe, as we require bdi backing to actually
1414 * write out data in the first place
1415 */
1416 - if (!sb->s_bdi || sb->s_bdi == &noop_backing_dev_info)
1417 + if (sb->s_bdi == &noop_backing_dev_info)
1418 return 0;
1419
1420 if (sb->s_qcop && sb->s_qcop->quota_sync)
1421 @@ -79,7 +79,7 @@ EXPORT_SYMBOL_GPL(sync_filesystem);
1422
1423 static void sync_one_sb(struct super_block *sb, void *arg)
1424 {
1425 - if (!(sb->s_flags & MS_RDONLY) && sb->s_bdi)
1426 + if (!(sb->s_flags & MS_RDONLY))
1427 __sync_filesystem(sb, *(int *)arg);
1428 }
1429 /*
1430 diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
1431 index 6628a50..9ebb1b8 100644
1432 --- a/include/linux/ethtool.h
1433 +++ b/include/linux/ethtool.h
1434 @@ -13,6 +13,9 @@
1435 #ifndef _LINUX_ETHTOOL_H
1436 #define _LINUX_ETHTOOL_H
1437
1438 +#ifdef __KERNEL__
1439 +#include <linux/compat.h>
1440 +#endif
1441 #include <linux/types.h>
1442 #include <linux/if_ether.h>
1443
1444 @@ -449,6 +452,37 @@ struct ethtool_rxnfc {
1445 __u32 rule_locs[0];
1446 };
1447
1448 +#ifdef __KERNEL__
1449 +#ifdef CONFIG_COMPAT
1450 +
1451 +struct compat_ethtool_rx_flow_spec {
1452 + u32 flow_type;
1453 + union {
1454 + struct ethtool_tcpip4_spec tcp_ip4_spec;
1455 + struct ethtool_tcpip4_spec udp_ip4_spec;
1456 + struct ethtool_tcpip4_spec sctp_ip4_spec;
1457 + struct ethtool_ah_espip4_spec ah_ip4_spec;
1458 + struct ethtool_ah_espip4_spec esp_ip4_spec;
1459 + struct ethtool_usrip4_spec usr_ip4_spec;
1460 + struct ethhdr ether_spec;
1461 + u8 hdata[72];
1462 + } h_u, m_u;
1463 + compat_u64 ring_cookie;
1464 + u32 location;
1465 +};
1466 +
1467 +struct compat_ethtool_rxnfc {
1468 + u32 cmd;
1469 + u32 flow_type;
1470 + compat_u64 data;
1471 + struct compat_ethtool_rx_flow_spec fs;
1472 + u32 rule_cnt;
1473 + u32 rule_locs[0];
1474 +};
1475 +
1476 +#endif /* CONFIG_COMPAT */
1477 +#endif /* __KERNEL__ */
1478 +
1479 /**
1480 * struct ethtool_rxfh_indir - command to get or set RX flow hash indirection
1481 * @cmd: Specific command number - %ETHTOOL_GRXFHINDIR or %ETHTOOL_SRXFHINDIR
1482 diff --git a/kernel/cgroup.c b/kernel/cgroup.c
1483 index 66a416b..a328a75 100644
1484 --- a/kernel/cgroup.c
1485 +++ b/kernel/cgroup.c
1486 @@ -1791,10 +1791,8 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
1487
1488 /* Update the css_set linked lists if we're using them */
1489 write_lock(&css_set_lock);
1490 - if (!list_empty(&tsk->cg_list)) {
1491 - list_del(&tsk->cg_list);
1492 - list_add(&tsk->cg_list, &newcg->tasks);
1493 - }
1494 + if (!list_empty(&tsk->cg_list))
1495 + list_move(&tsk->cg_list, &newcg->tasks);
1496 write_unlock(&css_set_lock);
1497
1498 for_each_subsys(root, ss) {
1499 @@ -3630,12 +3628,12 @@ again:
1500 spin_lock(&release_list_lock);
1501 set_bit(CGRP_REMOVED, &cgrp->flags);
1502 if (!list_empty(&cgrp->release_list))
1503 - list_del(&cgrp->release_list);
1504 + list_del_init(&cgrp->release_list);
1505 spin_unlock(&release_list_lock);
1506
1507 cgroup_lock_hierarchy(cgrp->root);
1508 /* delete this cgroup from parent->children */
1509 - list_del(&cgrp->sibling);
1510 + list_del_init(&cgrp->sibling);
1511 cgroup_unlock_hierarchy(cgrp->root);
1512
1513 spin_lock(&cgrp->dentry->d_lock);
1514 @@ -3856,7 +3854,7 @@ void cgroup_unload_subsys(struct cgroup_subsys *ss)
1515 subsys[ss->subsys_id] = NULL;
1516
1517 /* remove subsystem from rootnode's list of subsystems */
1518 - list_del(&ss->sibling);
1519 + list_del_init(&ss->sibling);
1520
1521 /*
1522 * disentangle the css from all css_sets attached to the dummytop. as
1523 @@ -4230,7 +4228,7 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks)
1524 if (!list_empty(&tsk->cg_list)) {
1525 write_lock(&css_set_lock);
1526 if (!list_empty(&tsk->cg_list))
1527 - list_del(&tsk->cg_list);
1528 + list_del_init(&tsk->cg_list);
1529 write_unlock(&css_set_lock);
1530 }
1531
1532 diff --git a/kernel/perf_event.c b/kernel/perf_event.c
1533 index ee489d0..a176dfb 100644
1534 --- a/kernel/perf_event.c
1535 +++ b/kernel/perf_event.c
1536 @@ -5863,17 +5863,20 @@ __perf_event_exit_task(struct perf_event *child_event,
1537 struct perf_event_context *child_ctx,
1538 struct task_struct *child)
1539 {
1540 - struct perf_event *parent_event;
1541 + if (child_event->parent) {
1542 + raw_spin_lock_irq(&child_ctx->lock);
1543 + perf_group_detach(child_event);
1544 + raw_spin_unlock_irq(&child_ctx->lock);
1545 + }
1546
1547 perf_event_remove_from_context(child_event);
1548
1549 - parent_event = child_event->parent;
1550 /*
1551 - * It can happen that parent exits first, and has events
1552 + * It can happen that the parent exits first, and has events
1553 * that are still around due to the child reference. These
1554 - * events need to be zapped - but otherwise linger.
1555 + * events need to be zapped.
1556 */
1557 - if (parent_event) {
1558 + if (child_event->parent) {
1559 sync_child_event(child_event, child);
1560 free_event(child_event);
1561 }
1562 diff --git a/kernel/signal.c b/kernel/signal.c
1563 index 4e3cff1..3175186 100644
1564 --- a/kernel/signal.c
1565 +++ b/kernel/signal.c
1566 @@ -2421,9 +2421,13 @@ SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
1567 return -EFAULT;
1568
1569 /* Not even root can pretend to send signals from the kernel.
1570 - Nor can they impersonate a kill(), which adds source info. */
1571 - if (info.si_code >= 0)
1572 + * Nor can they impersonate a kill()/tgkill(), which adds source info.
1573 + */
1574 + if (info.si_code != SI_QUEUE) {
1575 + /* We used to allow any < 0 si_code */
1576 + WARN_ON_ONCE(info.si_code < 0);
1577 return -EPERM;
1578 + }
1579 info.si_signo = sig;
1580
1581 /* POSIX.1b doesn't mention process groups. */
1582 @@ -2437,9 +2441,13 @@ long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
1583 return -EINVAL;
1584
1585 /* Not even root can pretend to send signals from the kernel.
1586 - Nor can they impersonate a kill(), which adds source info. */
1587 - if (info->si_code >= 0)
1588 + * Nor can they impersonate a kill()/tgkill(), which adds source info.
1589 + */
1590 + if (info->si_code != SI_QUEUE) {
1591 + /* We used to allow any < 0 si_code */
1592 + WARN_ON_ONCE(info->si_code < 0);
1593 return -EPERM;
1594 + }
1595 info->si_signo = sig;
1596
1597 return do_send_specific(tgid, pid, sig, info);
1598 diff --git a/kernel/sysctl.c b/kernel/sysctl.c
1599 index 4e17828..b44b1b0 100644
1600 --- a/kernel/sysctl.c
1601 +++ b/kernel/sysctl.c
1602 @@ -168,6 +168,11 @@ static int proc_taint(struct ctl_table *table, int write,
1603 void __user *buffer, size_t *lenp, loff_t *ppos);
1604 #endif
1605
1606 +#ifdef CONFIG_PRINTK
1607 +static int proc_dmesg_restrict(struct ctl_table *table, int write,
1608 + void __user *buffer, size_t *lenp, loff_t *ppos);
1609 +#endif
1610 +
1611 #ifdef CONFIG_MAGIC_SYSRQ
1612 /* Note: sysrq code uses it's own private copy */
1613 static int __sysrq_enabled = SYSRQ_DEFAULT_ENABLE;
1614 @@ -1469,7 +1474,7 @@ static struct ctl_table fs_table[] = {
1615 .data = &suid_dumpable,
1616 .maxlen = sizeof(int),
1617 .mode = 0644,
1618 - .proc_handler = proc_dointvec_minmax,
1619 + .proc_handler = proc_dmesg_restrict,
1620 .extra1 = &zero,
1621 .extra2 = &two,
1622 },
1623 @@ -2400,6 +2405,17 @@ static int proc_taint(struct ctl_table *table, int write,
1624 return err;
1625 }
1626
1627 +#ifdef CONFIG_PRINTK
1628 +static int proc_dmesg_restrict(struct ctl_table *table, int write,
1629 + void __user *buffer, size_t *lenp, loff_t *ppos)
1630 +{
1631 + if (write && !capable(CAP_SYS_ADMIN))
1632 + return -EPERM;
1633 +
1634 + return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
1635 +}
1636 +#endif
1637 +
1638 struct do_proc_dointvec_minmax_conv_param {
1639 int *min;
1640 int *max;
1641 diff --git a/mm/backing-dev.c b/mm/backing-dev.c
1642 index 027100d..8e4ed88 100644
1643 --- a/mm/backing-dev.c
1644 +++ b/mm/backing-dev.c
1645 @@ -604,7 +604,7 @@ static void bdi_prune_sb(struct backing_dev_info *bdi)
1646 spin_lock(&sb_lock);
1647 list_for_each_entry(sb, &super_blocks, s_list) {
1648 if (sb->s_bdi == bdi)
1649 - sb->s_bdi = NULL;
1650 + sb->s_bdi = &default_backing_dev_info;
1651 }
1652 spin_unlock(&sb_lock);
1653 }
1654 diff --git a/mm/oom_kill.c b/mm/oom_kill.c
1655 index 7dcca55..33b5861 100644
1656 --- a/mm/oom_kill.c
1657 +++ b/mm/oom_kill.c
1658 @@ -31,6 +31,7 @@
1659 #include <linux/memcontrol.h>
1660 #include <linux/mempolicy.h>
1661 #include <linux/security.h>
1662 +#include <linux/ptrace.h>
1663
1664 int sysctl_panic_on_oom;
1665 int sysctl_oom_kill_allocating_task;
1666 @@ -292,13 +293,15 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
1667 unsigned long totalpages, struct mem_cgroup *mem,
1668 const nodemask_t *nodemask)
1669 {
1670 - struct task_struct *p;
1671 + struct task_struct *g, *p;
1672 struct task_struct *chosen = NULL;
1673 *ppoints = 0;
1674
1675 - for_each_process(p) {
1676 + do_each_thread(g, p) {
1677 unsigned int points;
1678
1679 + if (!p->mm)
1680 + continue;
1681 if (oom_unkillable_task(p, mem, nodemask))
1682 continue;
1683
1684 @@ -314,22 +317,29 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
1685 if (test_tsk_thread_flag(p, TIF_MEMDIE))
1686 return ERR_PTR(-1UL);
1687
1688 - /*
1689 - * This is in the process of releasing memory so wait for it
1690 - * to finish before killing some other task by mistake.
1691 - *
1692 - * However, if p is the current task, we allow the 'kill' to
1693 - * go ahead if it is exiting: this will simply set TIF_MEMDIE,
1694 - * which will allow it to gain access to memory reserves in
1695 - * the process of exiting and releasing its resources.
1696 - * Otherwise we could get an easy OOM deadlock.
1697 - */
1698 - if (thread_group_empty(p) && (p->flags & PF_EXITING) && p->mm) {
1699 - if (p != current)
1700 - return ERR_PTR(-1UL);
1701 -
1702 - chosen = p;
1703 - *ppoints = 1000;
1704 + if (p->flags & PF_EXITING) {
1705 + /*
1706 + * If p is the current task and is in the process of
1707 + * releasing memory, we allow the "kill" to set
1708 + * TIF_MEMDIE, which will allow it to gain access to
1709 + * memory reserves. Otherwise, it may stall forever.
1710 + *
1711 + * The loop isn't broken here, however, in case other
1712 + * threads are found to have already been oom killed.
1713 + */
1714 + if (p == current) {
1715 + chosen = p;
1716 + *ppoints = 1000;
1717 + } else {
1718 + /*
1719 + * If this task is not being ptraced on exit,
1720 + * then wait for it to finish before killing
1721 + * some other task unnecessarily.
1722 + */
1723 + if (!(task_ptrace(p->group_leader) &
1724 + PT_TRACE_EXIT))
1725 + return ERR_PTR(-1UL);
1726 + }
1727 }
1728
1729 points = oom_badness(p, mem, nodemask, totalpages);
1730 @@ -337,7 +347,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
1731 chosen = p;
1732 *ppoints = points;
1733 }
1734 - }
1735 + } while_each_thread(g, p);
1736
1737 return chosen;
1738 }
1739 @@ -491,6 +501,8 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
1740 list_for_each_entry(child, &t->children, sibling) {
1741 unsigned int child_points;
1742
1743 + if (child->mm == p->mm)
1744 + continue;
1745 /*
1746 * oom_badness() returns 0 if the thread is unkillable
1747 */
1748 diff --git a/mm/shmem.c b/mm/shmem.c
1749 index 47fdeeb..76e971c 100644
1750 --- a/mm/shmem.c
1751 +++ b/mm/shmem.c
1752 @@ -2784,5 +2784,6 @@ int shmem_zero_setup(struct vm_area_struct *vma)
1753 fput(vma->vm_file);
1754 vma->vm_file = file;
1755 vma->vm_ops = &shmem_vm_ops;
1756 + vma->vm_flags |= VM_CAN_NONLINEAR;
1757 return 0;
1758 }
1759 diff --git a/mm/slab.c b/mm/slab.c
1760 index b1e40da..e548c91 100644
1761 --- a/mm/slab.c
1762 +++ b/mm/slab.c
1763 @@ -2288,8 +2288,8 @@ kmem_cache_create (const char *name, size_t size, size_t align,
1764 if (ralign < align) {
1765 ralign = align;
1766 }
1767 - /* disable debug if not aligning with REDZONE_ALIGN */
1768 - if (ralign & (__alignof__(unsigned long long) - 1))
1769 + /* disable debug if necessary */
1770 + if (ralign > __alignof__(unsigned long long))
1771 flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
1772 /*
1773 * 4) Store it.
1774 @@ -2315,8 +2315,8 @@ kmem_cache_create (const char *name, size_t size, size_t align,
1775 */
1776 if (flags & SLAB_RED_ZONE) {
1777 /* add space for red zone words */
1778 - cachep->obj_offset += align;
1779 - size += align + sizeof(unsigned long long);
1780 + cachep->obj_offset += sizeof(unsigned long long);
1781 + size += 2 * sizeof(unsigned long long);
1782 }
1783 if (flags & SLAB_STORE_USER) {
1784 /* user store requires one word storage behind the end of
1785 diff --git a/mm/swapfile.c b/mm/swapfile.c
1786 index 67ddaaf..5cd34e6 100644
1787 --- a/mm/swapfile.c
1788 +++ b/mm/swapfile.c
1789 @@ -2146,8 +2146,13 @@ bad_swap_2:
1790 p->flags = 0;
1791 spin_unlock(&swap_lock);
1792 vfree(swap_map);
1793 - if (swap_file)
1794 + if (swap_file) {
1795 + if (did_down) {
1796 + mutex_unlock(&inode->i_mutex);
1797 + did_down = 0;
1798 + }
1799 filp_close(swap_file, NULL);
1800 + }
1801 out:
1802 if (page && !IS_ERR(page)) {
1803 kunmap(page);
1804 diff --git a/net/socket.c b/net/socket.c
1805 index 088fb3f..01ca953 100644
1806 --- a/net/socket.c
1807 +++ b/net/socket.c
1808 @@ -2566,23 +2566,123 @@ static int dev_ifconf(struct net *net, struct compat_ifconf __user *uifc32)
1809
1810 static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
1811 {
1812 + struct compat_ethtool_rxnfc __user *compat_rxnfc;
1813 + bool convert_in = false, convert_out = false;
1814 + size_t buf_size = ALIGN(sizeof(struct ifreq), 8);
1815 + struct ethtool_rxnfc __user *rxnfc;
1816 struct ifreq __user *ifr;
1817 + u32 rule_cnt = 0, actual_rule_cnt;
1818 + u32 ethcmd;
1819 u32 data;
1820 - void __user *datap;
1821 + int ret;
1822 +
1823 + if (get_user(data, &ifr32->ifr_ifru.ifru_data))
1824 + return -EFAULT;
1825
1826 - ifr = compat_alloc_user_space(sizeof(*ifr));
1827 + compat_rxnfc = compat_ptr(data);
1828
1829 - if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ))
1830 + if (get_user(ethcmd, &compat_rxnfc->cmd))
1831 return -EFAULT;
1832
1833 - if (get_user(data, &ifr32->ifr_ifru.ifru_data))
1834 + /* Most ethtool structures are defined without padding.
1835 + * Unfortunately struct ethtool_rxnfc is an exception.
1836 + */
1837 + switch (ethcmd) {
1838 + default:
1839 + break;
1840 + case ETHTOOL_GRXCLSRLALL:
1841 + /* Buffer size is variable */
1842 + if (get_user(rule_cnt, &compat_rxnfc->rule_cnt))
1843 + return -EFAULT;
1844 + if (rule_cnt > KMALLOC_MAX_SIZE / sizeof(u32))
1845 + return -ENOMEM;
1846 + buf_size += rule_cnt * sizeof(u32);
1847 + /* fall through */
1848 + case ETHTOOL_GRXRINGS:
1849 + case ETHTOOL_GRXCLSRLCNT:
1850 + case ETHTOOL_GRXCLSRULE:
1851 + convert_out = true;
1852 + /* fall through */
1853 + case ETHTOOL_SRXCLSRLDEL:
1854 + case ETHTOOL_SRXCLSRLINS:
1855 + buf_size += sizeof(struct ethtool_rxnfc);
1856 + convert_in = true;
1857 + break;
1858 + }
1859 +
1860 + ifr = compat_alloc_user_space(buf_size);
1861 + rxnfc = (void *)ifr + ALIGN(sizeof(struct ifreq), 8);
1862 +
1863 + if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ))
1864 return -EFAULT;
1865
1866 - datap = compat_ptr(data);
1867 - if (put_user(datap, &ifr->ifr_ifru.ifru_data))
1868 + if (put_user(convert_in ? rxnfc : compat_ptr(data),
1869 + &ifr->ifr_ifru.ifru_data))
1870 return -EFAULT;
1871
1872 - return dev_ioctl(net, SIOCETHTOOL, ifr);
1873 + if (convert_in) {
1874 + /* We expect there to be holes between fs.m_u and
1875 + * fs.ring_cookie and at the end of fs, but nowhere else.
1876 + */
1877 + BUILD_BUG_ON(offsetof(struct compat_ethtool_rxnfc, fs.m_u) +
1878 + sizeof(compat_rxnfc->fs.m_u) !=
1879 + offsetof(struct ethtool_rxnfc, fs.m_u) +
1880 + sizeof(rxnfc->fs.m_u));
1881 + BUILD_BUG_ON(
1882 + offsetof(struct compat_ethtool_rxnfc, fs.location) -
1883 + offsetof(struct compat_ethtool_rxnfc, fs.ring_cookie) !=
1884 + offsetof(struct ethtool_rxnfc, fs.location) -
1885 + offsetof(struct ethtool_rxnfc, fs.ring_cookie));
1886 +
1887 + if (copy_in_user(rxnfc, compat_rxnfc,
1888 + (void *)(&rxnfc->fs.m_u + 1) -
1889 + (void *)rxnfc) ||
1890 + copy_in_user(&rxnfc->fs.ring_cookie,
1891 + &compat_rxnfc->fs.ring_cookie,
1892 + (void *)(&rxnfc->fs.location + 1) -
1893 + (void *)&rxnfc->fs.ring_cookie) ||
1894 + copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt,
1895 + sizeof(rxnfc->rule_cnt)))
1896 + return -EFAULT;
1897 + }
1898 +
1899 + ret = dev_ioctl(net, SIOCETHTOOL, ifr);
1900 + if (ret)
1901 + return ret;
1902 +
1903 + if (convert_out) {
1904 + if (copy_in_user(compat_rxnfc, rxnfc,
1905 + (const void *)(&rxnfc->fs.m_u + 1) -
1906 + (const void *)rxnfc) ||
1907 + copy_in_user(&compat_rxnfc->fs.ring_cookie,
1908 + &rxnfc->fs.ring_cookie,
1909 + (const void *)(&rxnfc->fs.location + 1) -
1910 + (const void *)&rxnfc->fs.ring_cookie) ||
1911 + copy_in_user(&compat_rxnfc->rule_cnt, &rxnfc->rule_cnt,
1912 + sizeof(rxnfc->rule_cnt)))
1913 + return -EFAULT;
1914 +
1915 + if (ethcmd == ETHTOOL_GRXCLSRLALL) {
1916 + /* As an optimisation, we only copy the actual
1917 + * number of rules that the underlying
1918 + * function returned. Since Mallory might
1919 + * change the rule count in user memory, we
1920 + * check that it is less than the rule count
1921 + * originally given (as the user buffer size),
1922 + * which has been range-checked.
1923 + */
1924 + if (get_user(actual_rule_cnt, &rxnfc->rule_cnt))
1925 + return -EFAULT;
1926 + if (actual_rule_cnt < rule_cnt)
1927 + rule_cnt = actual_rule_cnt;
1928 + if (copy_in_user(&compat_rxnfc->rule_locs[0],
1929 + &rxnfc->rule_locs[0],
1930 + rule_cnt * sizeof(u32)))
1931 + return -EFAULT;
1932 + }
1933 + }
1934 +
1935 + return 0;
1936 }
1937
1938 static int compat_siocwandev(struct net *net, struct compat_ifreq __user *uifr32)
1939 diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
1940 index 3ad452b..2e88619 100644
1941 --- a/net/sunrpc/xprtsock.c
1942 +++ b/net/sunrpc/xprtsock.c
1943 @@ -710,6 +710,8 @@ static void xs_reset_transport(struct sock_xprt *transport)
1944 if (sk == NULL)
1945 return;
1946
1947 + transport->srcport = 0;
1948 +
1949 write_lock_bh(&sk->sk_callback_lock);
1950 transport->inet = NULL;
1951 transport->sock = NULL;
1952 diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
1953 index 797a16c..228d6fb 100644
1954 --- a/sound/pci/hda/patch_sigmatel.c
1955 +++ b/sound/pci/hda/patch_sigmatel.c
1956 @@ -94,6 +94,7 @@ enum {
1957 STAC_92HD83XXX_REF,
1958 STAC_92HD83XXX_PWR_REF,
1959 STAC_DELL_S14,
1960 + STAC_DELL_E5520M,
1961 STAC_92HD83XXX_HP,
1962 STAC_HP_DV7_4000,
1963 STAC_92HD83XXX_MODELS
1964 @@ -1649,6 +1650,13 @@ static unsigned int dell_s14_pin_configs[10] = {
1965 0x40f000f0, 0x40f000f0,
1966 };
1967
1968 +/* Switch int mic from 0x20 to 0x11 */
1969 +static unsigned int dell_e5520m_pin_configs[10] = {
1970 + 0x04a11020, 0x0421101f, 0x400000f0, 0x90170110,
1971 + 0x23011050, 0x23a1102e, 0x400000f3, 0xd5a30130,
1972 + 0x400000f0, 0x40f000f0,
1973 +};
1974 +
1975 static unsigned int hp_dv7_4000_pin_configs[10] = {
1976 0x03a12050, 0x0321201f, 0x40f000f0, 0x90170110,
1977 0x40f000f0, 0x40f000f0, 0x90170110, 0xd5a30140,
1978 @@ -1659,6 +1667,7 @@ static unsigned int *stac92hd83xxx_brd_tbl[STAC_92HD83XXX_MODELS] = {
1979 [STAC_92HD83XXX_REF] = ref92hd83xxx_pin_configs,
1980 [STAC_92HD83XXX_PWR_REF] = ref92hd83xxx_pin_configs,
1981 [STAC_DELL_S14] = dell_s14_pin_configs,
1982 + [STAC_DELL_E5520M] = dell_e5520m_pin_configs,
1983 [STAC_HP_DV7_4000] = hp_dv7_4000_pin_configs,
1984 };
1985
1986 @@ -1667,6 +1676,7 @@ static const char *stac92hd83xxx_models[STAC_92HD83XXX_MODELS] = {
1987 [STAC_92HD83XXX_REF] = "ref",
1988 [STAC_92HD83XXX_PWR_REF] = "mic-ref",
1989 [STAC_DELL_S14] = "dell-s14",
1990 + [STAC_DELL_E5520M] = "dell-e5520m",
1991 [STAC_92HD83XXX_HP] = "hp",
1992 [STAC_HP_DV7_4000] = "hp-dv7-4000",
1993 };
1994 @@ -1679,6 +1689,14 @@ static struct snd_pci_quirk stac92hd83xxx_cfg_tbl[] = {
1995 "DFI LanParty", STAC_92HD83XXX_REF),
1996 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02ba,
1997 "unknown Dell", STAC_DELL_S14),
1998 + SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x049a,
1999 + "Dell E5520", STAC_DELL_E5520M),
2000 + SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x049b,
2001 + "Dell E5420", STAC_DELL_E5520M),
2002 + SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x04eb,
2003 + "Dell E5420m", STAC_DELL_E5520M),
2004 + SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x04ec,
2005 + "Dell E5520m", STAC_DELL_E5520M),
2006 SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xff00, 0x3600,
2007 "HP", STAC_92HD83XXX_HP),
2008 {} /* terminator */
2009 diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
2010 index e1b775d..d45ba2d 100644
2011 --- a/sound/pci/hda/patch_via.c
2012 +++ b/sound/pci/hda/patch_via.c
2013 @@ -159,6 +159,7 @@ struct via_spec {
2014 #endif
2015 };
2016
2017 +static enum VIA_HDA_CODEC get_codec_type(struct hda_codec *codec);
2018 static struct via_spec * via_new_spec(struct hda_codec *codec)
2019 {
2020 struct via_spec *spec;
2021 @@ -169,6 +170,10 @@ static struct via_spec * via_new_spec(struct hda_codec *codec)
2022
2023 codec->spec = spec;
2024 spec->codec = codec;
2025 + spec->codec_type = get_codec_type(codec);
2026 + /* VT1708BCE & VT1708S are almost same */
2027 + if (spec->codec_type == VT1708BCE)
2028 + spec->codec_type = VT1708S;
2029 return spec;
2030 }
2031
2032 @@ -1102,6 +1107,7 @@ static int via_mux_enum_put(struct snd_kcontrol *kcontrol,
2033 struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
2034 struct via_spec *spec = codec->spec;
2035 unsigned int adc_idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
2036 + int ret;
2037
2038 if (!spec->mux_nids[adc_idx])
2039 return -EINVAL;
2040 @@ -1110,12 +1116,14 @@ static int via_mux_enum_put(struct snd_kcontrol *kcontrol,
2041 AC_VERB_GET_POWER_STATE, 0x00) != AC_PWRST_D0)
2042 snd_hda_codec_write(codec, spec->mux_nids[adc_idx], 0,
2043 AC_VERB_SET_POWER_STATE, AC_PWRST_D0);
2044 - /* update jack power state */
2045 - set_jack_power_state(codec);
2046
2047 - return snd_hda_input_mux_put(codec, spec->input_mux, ucontrol,
2048 + ret = snd_hda_input_mux_put(codec, spec->input_mux, ucontrol,
2049 spec->mux_nids[adc_idx],
2050 &spec->cur_mux[adc_idx]);
2051 + /* update jack power state */
2052 + set_jack_power_state(codec);
2053 +
2054 + return ret;
2055 }
2056
2057 static int via_independent_hp_info(struct snd_kcontrol *kcontrol,
2058 @@ -1189,8 +1197,16 @@ static int via_independent_hp_put(struct snd_kcontrol *kcontrol,
2059 /* Get Independent Mode index of headphone pin widget */
2060 spec->hp_independent_mode = spec->hp_independent_mode_index == pinsel
2061 ? 1 : 0;
2062 - snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_CONNECT_SEL, pinsel);
2063 + if (spec->codec_type == VT1718S)
2064 + snd_hda_codec_write(codec, nid, 0,
2065 + AC_VERB_SET_CONNECT_SEL, pinsel ? 2 : 0);
2066 + else
2067 + snd_hda_codec_write(codec, nid, 0,
2068 + AC_VERB_SET_CONNECT_SEL, pinsel);
2069
2070 + if (spec->codec_type == VT1812)
2071 + snd_hda_codec_write(codec, 0x35, 0,
2072 + AC_VERB_SET_CONNECT_SEL, pinsel);
2073 if (spec->multiout.hp_nid && spec->multiout.hp_nid
2074 != spec->multiout.dac_nids[HDA_FRONT])
2075 snd_hda_codec_setup_stream(codec, spec->multiout.hp_nid,
2076 @@ -1209,6 +1225,8 @@ static int via_independent_hp_put(struct snd_kcontrol *kcontrol,
2077 activate_ctl(codec, "Headphone Playback Switch",
2078 spec->hp_independent_mode);
2079 }
2080 + /* update jack power state */
2081 + set_jack_power_state(codec);
2082 return 0;
2083 }
2084
2085 @@ -1249,9 +1267,12 @@ static int via_hp_build(struct hda_codec *codec)
2086 break;
2087 }
2088
2089 - nums = snd_hda_get_connections(codec, nid, conn, HDA_MAX_CONNECTIONS);
2090 - if (nums <= 1)
2091 - return 0;
2092 + if (spec->codec_type != VT1708) {
2093 + nums = snd_hda_get_connections(codec, nid,
2094 + conn, HDA_MAX_CONNECTIONS);
2095 + if (nums <= 1)
2096 + return 0;
2097 + }
2098
2099 knew = via_clone_control(spec, &via_hp_mixer[0]);
2100 if (knew == NULL)
2101 @@ -1311,6 +1332,11 @@ static void mute_aa_path(struct hda_codec *codec, int mute)
2102 start_idx = 2;
2103 end_idx = 4;
2104 break;
2105 + case VT1718S:
2106 + nid_mixer = 0x21;
2107 + start_idx = 1;
2108 + end_idx = 3;
2109 + break;
2110 default:
2111 return;
2112 }
2113 @@ -2186,10 +2212,6 @@ static int via_init(struct hda_codec *codec)
2114 for (i = 0; i < spec->num_iverbs; i++)
2115 snd_hda_sequence_write(codec, spec->init_verbs[i]);
2116
2117 - spec->codec_type = get_codec_type(codec);
2118 - if (spec->codec_type == VT1708BCE)
2119 - spec->codec_type = VT1708S; /* VT1708BCE & VT1708S are almost
2120 - same */
2121 /* Lydia Add for EAPD enable */
2122 if (!spec->dig_in_nid) { /* No Digital In connection */
2123 if (spec->dig_in_pin) {
2124 @@ -2437,7 +2459,14 @@ static int vt_auto_create_analog_input_ctls(struct hda_codec *codec,
2125 else
2126 type_idx = 0;
2127 label = hda_get_autocfg_input_label(codec, cfg, i);
2128 - err = via_new_analog_input(spec, label, type_idx, idx, cap_nid);
2129 + if (spec->codec_type == VT1708S ||
2130 + spec->codec_type == VT1702 ||
2131 + spec->codec_type == VT1716S)
2132 + err = via_new_analog_input(spec, label, type_idx,
2133 + idx+1, cap_nid);
2134 + else
2135 + err = via_new_analog_input(spec, label, type_idx,
2136 + idx, cap_nid);
2137 if (err < 0)
2138 return err;
2139 snd_hda_add_imux_item(imux, label, idx, NULL);

  ViewVC Help
Powered by ViewVC 1.1.20