/[linux-patches]/genpatches-2.6/trunk/3.4/1014_linux-3.4.15.patch
Gentoo

Contents of /genpatches-2.6/trunk/3.4/1014_linux-3.4.15.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2254 - (show annotations) (download)
Wed Dec 19 19:51:16 2012 UTC (21 months ago) by mpagano
File size: 81271 byte(s)
Linux patches 3.4.12 through and including 3.4.24
1 diff --git a/Makefile b/Makefile
2 index d174c84..fe9ea67 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 3
7 PATCHLEVEL = 4
8 -SUBLEVEL = 14
9 +SUBLEVEL = 15
10 EXTRAVERSION =
11 NAME = Saber-toothed Squirrel
12
13 diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
14 index 352322a..e14ae11 100644
15 --- a/arch/arm/Kconfig
16 +++ b/arch/arm/Kconfig
17 @@ -1405,6 +1405,16 @@ config PL310_ERRATA_769419
18 on systems with an outer cache, the store buffer is drained
19 explicitly.
20
21 +config ARM_ERRATA_775420
22 + bool "ARM errata: A data cache maintenance operation which aborts, might lead to deadlock"
23 + depends on CPU_V7
24 + help
25 + This option enables the workaround for the 775420 Cortex-A9 (r2p2,
26 + r2p6,r2p8,r2p10,r3p0) erratum. In case a date cache maintenance
27 + operation aborts with MMU exception, it might cause the processor
28 + to deadlock. This workaround puts DSB before executing ISB if
29 + an abort may occur on cache maintenance.
30 +
31 endmenu
32
33 source "arch/arm/common/Kconfig"
34 diff --git a/arch/arm/include/asm/vfpmacros.h b/arch/arm/include/asm/vfpmacros.h
35 index 3d5fc41..bf53047 100644
36 --- a/arch/arm/include/asm/vfpmacros.h
37 +++ b/arch/arm/include/asm/vfpmacros.h
38 @@ -28,7 +28,7 @@
39 ldr \tmp, =elf_hwcap @ may not have MVFR regs
40 ldr \tmp, [\tmp, #0]
41 tst \tmp, #HWCAP_VFPv3D16
42 - ldceq p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31}
43 + ldceql p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31}
44 addne \base, \base, #32*4 @ step over unused register space
45 #else
46 VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0
47 @@ -52,7 +52,7 @@
48 ldr \tmp, =elf_hwcap @ may not have MVFR regs
49 ldr \tmp, [\tmp, #0]
50 tst \tmp, #HWCAP_VFPv3D16
51 - stceq p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31}
52 + stceql p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31}
53 addne \base, \base, #32*4 @ step over unused register space
54 #else
55 VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0
56 diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
57 index a655d3d..82ab2c5 100644
58 --- a/arch/arm/mm/cache-v7.S
59 +++ b/arch/arm/mm/cache-v7.S
60 @@ -211,6 +211,9 @@ ENTRY(v7_coherent_user_range)
61 * isn't mapped, just try the next page.
62 */
63 9001:
64 +#ifdef CONFIG_ARM_ERRATA_775420
65 + dsb
66 +#endif
67 mov r12, r12, lsr #12
68 mov r12, r12, lsl #12
69 add r12, r12, #4096
70 diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
71 index f4546e9..23817a6 100644
72 --- a/arch/mips/kernel/kgdb.c
73 +++ b/arch/mips/kernel/kgdb.c
74 @@ -283,6 +283,15 @@ static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd,
75 struct pt_regs *regs = args->regs;
76 int trap = (regs->cp0_cause & 0x7c) >> 2;
77
78 +#ifdef CONFIG_KPROBES
79 + /*
80 + * Return immediately if the kprobes fault notifier has set
81 + * DIE_PAGE_FAULT.
82 + */
83 + if (cmd == DIE_PAGE_FAULT)
84 + return NOTIFY_DONE;
85 +#endif /* CONFIG_KPROBES */
86 +
87 /* Userspace events, ignore. */
88 if (user_mode(regs))
89 return NOTIFY_DONE;
90 diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
91 index 40edfc3..5910054 100644
92 --- a/arch/x86/xen/enlighten.c
93 +++ b/arch/x86/xen/enlighten.c
94 @@ -942,7 +942,16 @@ static void xen_write_cr4(unsigned long cr4)
95
96 native_write_cr4(cr4);
97 }
98 -
99 +#ifdef CONFIG_X86_64
100 +static inline unsigned long xen_read_cr8(void)
101 +{
102 + return 0;
103 +}
104 +static inline void xen_write_cr8(unsigned long val)
105 +{
106 + BUG_ON(val);
107 +}
108 +#endif
109 static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
110 {
111 int ret;
112 @@ -1111,6 +1120,11 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
113 .read_cr4_safe = native_read_cr4_safe,
114 .write_cr4 = xen_write_cr4,
115
116 +#ifdef CONFIG_X86_64
117 + .read_cr8 = xen_read_cr8,
118 + .write_cr8 = xen_write_cr8,
119 +#endif
120 +
121 .wbinvd = native_wbinvd,
122
123 .read_msr = native_read_msr_safe,
124 @@ -1121,6 +1135,8 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
125 .read_tsc = native_read_tsc,
126 .read_pmc = native_read_pmc,
127
128 + .read_tscp = native_read_tscp,
129 +
130 .iret = xen_iret,
131 .irq_enable_sysexit = xen_sysexit,
132 #ifdef CONFIG_X86_64
133 diff --git a/block/blk-core.c b/block/blk-core.c
134 index 1f61b74..85fd410 100644
135 --- a/block/blk-core.c
136 +++ b/block/blk-core.c
137 @@ -601,7 +601,7 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
138 q->request_fn = rfn;
139 q->prep_rq_fn = NULL;
140 q->unprep_rq_fn = NULL;
141 - q->queue_flags = QUEUE_FLAG_DEFAULT;
142 + q->queue_flags |= QUEUE_FLAG_DEFAULT;
143
144 /* Override internal queue lock with supplied lock pointer */
145 if (lock)
146 diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
147 index 7edaccc..a51df96 100644
148 --- a/drivers/acpi/ec.c
149 +++ b/drivers/acpi/ec.c
150 @@ -71,9 +71,6 @@ enum ec_command {
151 #define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */
152 #define ACPI_EC_MSI_UDELAY 550 /* Wait 550us for MSI EC */
153
154 -#define ACPI_EC_STORM_THRESHOLD 8 /* number of false interrupts
155 - per one transaction */
156 -
157 enum {
158 EC_FLAGS_QUERY_PENDING, /* Query is pending */
159 EC_FLAGS_GPE_STORM, /* GPE storm detected */
160 @@ -87,6 +84,15 @@ static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY;
161 module_param(ec_delay, uint, 0644);
162 MODULE_PARM_DESC(ec_delay, "Timeout(ms) waited until an EC command completes");
163
164 +/*
165 + * If the number of false interrupts per one transaction exceeds
166 + * this threshold, will think there is a GPE storm happened and
167 + * will disable the GPE for normal transaction.
168 + */
169 +static unsigned int ec_storm_threshold __read_mostly = 8;
170 +module_param(ec_storm_threshold, uint, 0644);
171 +MODULE_PARM_DESC(ec_storm_threshold, "Maxim false GPE numbers not considered as GPE storm");
172 +
173 /* If we find an EC via the ECDT, we need to keep a ptr to its context */
174 /* External interfaces use first EC only, so remember */
175 typedef int (*acpi_ec_query_func) (void *data);
176 @@ -319,7 +325,7 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
177 msleep(1);
178 /* It is safe to enable the GPE outside of the transaction. */
179 acpi_enable_gpe(NULL, ec->gpe);
180 - } else if (t->irq_count > ACPI_EC_STORM_THRESHOLD) {
181 + } else if (t->irq_count > ec_storm_threshold) {
182 pr_info(PREFIX "GPE storm detected, "
183 "transactions will use polling mode\n");
184 set_bit(EC_FLAGS_GPE_STORM, &ec->flags);
185 @@ -924,6 +930,17 @@ static int ec_flag_msi(const struct dmi_system_id *id)
186 return 0;
187 }
188
189 +/*
190 + * Clevo M720 notebook actually works ok with IRQ mode, if we lifted
191 + * the GPE storm threshold back to 20
192 + */
193 +static int ec_enlarge_storm_threshold(const struct dmi_system_id *id)
194 +{
195 + pr_debug("Setting the EC GPE storm threshold to 20\n");
196 + ec_storm_threshold = 20;
197 + return 0;
198 +}
199 +
200 static struct dmi_system_id __initdata ec_dmi_table[] = {
201 {
202 ec_skip_dsdt_scan, "Compal JFL92", {
203 @@ -955,10 +972,13 @@ static struct dmi_system_id __initdata ec_dmi_table[] = {
204 {
205 ec_validate_ecdt, "ASUS hardware", {
206 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc.") }, NULL},
207 + {
208 + ec_enlarge_storm_threshold, "CLEVO hardware", {
209 + DMI_MATCH(DMI_SYS_VENDOR, "CLEVO Co."),
210 + DMI_MATCH(DMI_PRODUCT_NAME, "M720T/M730T"),}, NULL},
211 {},
212 };
213
214 -
215 int __init acpi_ec_ecdt_probe(void)
216 {
217 acpi_status status;
218 diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
219 index 08427ab..27f8ddf 100644
220 --- a/drivers/char/tpm/tpm.c
221 +++ b/drivers/char/tpm/tpm.c
222 @@ -1186,17 +1186,20 @@ ssize_t tpm_write(struct file *file, const char __user *buf,
223 size_t size, loff_t *off)
224 {
225 struct tpm_chip *chip = file->private_data;
226 - size_t in_size = size, out_size;
227 + size_t in_size = size;
228 + ssize_t out_size;
229
230 /* cannot perform a write until the read has cleared
231 - either via tpm_read or a user_read_timer timeout */
232 - while (atomic_read(&chip->data_pending) != 0)
233 - msleep(TPM_TIMEOUT);
234 -
235 - mutex_lock(&chip->buffer_mutex);
236 + either via tpm_read or a user_read_timer timeout.
237 + This also prevents splitted buffered writes from blocking here.
238 + */
239 + if (atomic_read(&chip->data_pending) != 0)
240 + return -EBUSY;
241
242 if (in_size > TPM_BUFSIZE)
243 - in_size = TPM_BUFSIZE;
244 + return -E2BIG;
245 +
246 + mutex_lock(&chip->buffer_mutex);
247
248 if (copy_from_user
249 (chip->data_buffer, (void __user *) buf, in_size)) {
250 @@ -1206,6 +1209,10 @@ ssize_t tpm_write(struct file *file, const char __user *buf,
251
252 /* atomic tpm command send and result receive */
253 out_size = tpm_transmit(chip, chip->data_buffer, TPM_BUFSIZE);
254 + if (out_size < 0) {
255 + mutex_unlock(&chip->buffer_mutex);
256 + return out_size;
257 + }
258
259 atomic_set(&chip->data_pending, out_size);
260 mutex_unlock(&chip->buffer_mutex);
261 diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
262 index 2e6b245..b8e4809 100644
263 --- a/drivers/firewire/core-cdev.c
264 +++ b/drivers/firewire/core-cdev.c
265 @@ -471,8 +471,8 @@ static int ioctl_get_info(struct client *client, union ioctl_arg *arg)
266 client->bus_reset_closure = a->bus_reset_closure;
267 if (a->bus_reset != 0) {
268 fill_bus_reset_event(&bus_reset, client);
269 - ret = copy_to_user(u64_to_uptr(a->bus_reset),
270 - &bus_reset, sizeof(bus_reset));
271 + /* unaligned size of bus_reset is 36 bytes */
272 + ret = copy_to_user(u64_to_uptr(a->bus_reset), &bus_reset, 36);
273 }
274 if (ret == 0 && list_empty(&client->link))
275 list_add_tail(&client->link, &client->device->client_list);
276 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
277 index 498bcbe..f3b06f0 100644
278 --- a/drivers/gpu/drm/i915/intel_display.c
279 +++ b/drivers/gpu/drm/i915/intel_display.c
280 @@ -5318,7 +5318,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
281 /* default to 8bpc */
282 pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN);
283 if (is_dp) {
284 - if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
285 + if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
286 pipeconf |= PIPECONF_BPP_6 |
287 PIPECONF_DITHER_EN |
288 PIPECONF_DITHER_TYPE_SP;
289 @@ -5782,7 +5782,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
290 /* determine panel color depth */
291 temp = I915_READ(PIPECONF(pipe));
292 temp &= ~PIPE_BPC_MASK;
293 - dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, mode);
294 + dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, adjusted_mode);
295 switch (pipe_bpp) {
296 case 18:
297 temp |= PIPE_6BPC;
298 diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
299 index 42db254..1461e2c 100644
300 --- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
301 +++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
302 @@ -973,11 +973,7 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder,
303 static void radeon_ext_tmds_enc_destroy(struct drm_encoder *encoder)
304 {
305 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
306 - struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv;
307 - if (tmds) {
308 - if (tmds->i2c_bus)
309 - radeon_i2c_destroy(tmds->i2c_bus);
310 - }
311 + /* don't destroy the i2c bus record here, this will be done in radeon_i2c_fini */
312 kfree(radeon_encoder->enc_priv);
313 drm_encoder_cleanup(encoder);
314 kfree(radeon_encoder);
315 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
316 index 1f7e8cd..4a038cd 100644
317 --- a/drivers/md/raid10.c
318 +++ b/drivers/md/raid10.c
319 @@ -3019,7 +3019,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
320 else {
321 bad_sectors -= (sector - first_bad);
322 if (max_sync > bad_sectors)
323 - max_sync = max_sync;
324 + max_sync = bad_sectors;
325 continue;
326 }
327 }
328 diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
329 index b83897f..1ab8067 100644
330 --- a/drivers/net/ethernet/intel/e1000e/e1000.h
331 +++ b/drivers/net/ethernet/intel/e1000e/e1000.h
332 @@ -175,13 +175,13 @@ struct e1000_info;
333 /*
334 * in the case of WTHRESH, it appears at least the 82571/2 hardware
335 * writes back 4 descriptors when WTHRESH=5, and 3 descriptors when
336 - * WTHRESH=4, and since we want 64 bytes at a time written back, set
337 - * it to 5
338 + * WTHRESH=4, so a setting of 5 gives the most efficient bus
339 + * utilization but to avoid possible Tx stalls, set it to 1
340 */
341 #define E1000_TXDCTL_DMA_BURST_ENABLE \
342 (E1000_TXDCTL_GRAN | /* set descriptor granularity */ \
343 E1000_TXDCTL_COUNT_DESC | \
344 - (5 << 16) | /* wthresh must be +1 more than desired */\
345 + (1 << 16) | /* wthresh must be +1 more than desired */\
346 (1 << 8) | /* hthresh */ \
347 0x1f) /* pthresh */
348
349 diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
350 index 5621d5b..7e88aaf 100644
351 --- a/drivers/net/ethernet/intel/e1000e/netdev.c
352 +++ b/drivers/net/ethernet/intel/e1000e/netdev.c
353 @@ -2806,7 +2806,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
354 * set up some performance related parameters to encourage the
355 * hardware to use the bus more efficiently in bursts, depends
356 * on the tx_int_delay to be enabled,
357 - * wthresh = 5 ==> burst write a cacheline (64 bytes) at a time
358 + * wthresh = 1 ==> burst write is disabled to avoid Tx stalls
359 * hthresh = 1 ==> prefetch when one or more available
360 * pthresh = 0x1f ==> prefetch if internal cache 31 or less
361 * BEWARE: this seems to work but should be considered first if
362 diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
363 index 6264182..5eb53c9 100644
364 --- a/drivers/net/wireless/ath/ath9k/beacon.c
365 +++ b/drivers/net/wireless/ath/ath9k/beacon.c
366 @@ -121,7 +121,7 @@ static void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
367
368 if (ath_tx_start(hw, skb, &txctl) != 0) {
369 ath_dbg(common, XMIT, "CABQ TX failed\n");
370 - dev_kfree_skb_any(skb);
371 + ieee80211_free_txskb(hw, skb);
372 }
373 }
374
375 diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
376 index d5dabcb..91e2c4f 100644
377 --- a/drivers/net/wireless/ath/ath9k/main.c
378 +++ b/drivers/net/wireless/ath/ath9k/main.c
379 @@ -1147,7 +1147,7 @@ static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
380
381 return;
382 exit:
383 - dev_kfree_skb_any(skb);
384 + ieee80211_free_txskb(hw, skb);
385 }
386
387 static void ath9k_stop(struct ieee80211_hw *hw)
388 diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
389 index 4d57139..b78773b 100644
390 --- a/drivers/net/wireless/ath/ath9k/xmit.c
391 +++ b/drivers/net/wireless/ath/ath9k/xmit.c
392 @@ -64,8 +64,7 @@ static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
393 static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
394 struct ath_txq *txq,
395 struct ath_atx_tid *tid,
396 - struct sk_buff *skb,
397 - bool dequeue);
398 + struct sk_buff *skb);
399
400 enum {
401 MCS_HT20,
402 @@ -201,7 +200,15 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
403 fi = get_frame_info(skb);
404 bf = fi->bf;
405
406 - if (bf && fi->retries) {
407 + if (!bf) {
408 + bf = ath_tx_setup_buffer(sc, txq, tid, skb);
409 + if (!bf) {
410 + ieee80211_free_txskb(sc->hw, skb);
411 + continue;
412 + }
413 + }
414 +
415 + if (fi->retries) {
416 list_add_tail(&bf->list, &bf_head);
417 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
418 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
419 @@ -812,10 +819,13 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
420 fi = get_frame_info(skb);
421 bf = fi->bf;
422 if (!fi->bf)
423 - bf = ath_tx_setup_buffer(sc, txq, tid, skb, true);
424 + bf = ath_tx_setup_buffer(sc, txq, tid, skb);
425
426 - if (!bf)
427 + if (!bf) {
428 + __skb_unlink(skb, &tid->buf_q);
429 + ieee80211_free_txskb(sc->hw, skb);
430 continue;
431 + }
432
433 bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR;
434 seqno = bf->bf_state.seqno;
435 @@ -1717,9 +1727,11 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
436 return;
437 }
438
439 - bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb, false);
440 - if (!bf)
441 + bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
442 + if (!bf) {
443 + ieee80211_free_txskb(sc->hw, skb);
444 return;
445 + }
446
447 bf->bf_state.bf_type = BUF_AMPDU;
448 INIT_LIST_HEAD(&bf_head);
449 @@ -1743,11 +1755,6 @@ static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
450 struct ath_buf *bf;
451
452 bf = fi->bf;
453 - if (!bf)
454 - bf = ath_tx_setup_buffer(sc, txq, tid, skb, false);
455 -
456 - if (!bf)
457 - return;
458
459 INIT_LIST_HEAD(&bf_head);
460 list_add_tail(&bf->list, &bf_head);
461 @@ -1820,8 +1827,7 @@ u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
462 static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
463 struct ath_txq *txq,
464 struct ath_atx_tid *tid,
465 - struct sk_buff *skb,
466 - bool dequeue)
467 + struct sk_buff *skb)
468 {
469 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
470 struct ath_frame_info *fi = get_frame_info(skb);
471 @@ -1833,7 +1839,7 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
472 bf = ath_tx_get_buffer(sc);
473 if (!bf) {
474 ath_dbg(common, XMIT, "TX buffers are full\n");
475 - goto error;
476 + return NULL;
477 }
478
479 ATH_TXBUF_RESET(bf);
480 @@ -1862,18 +1868,12 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
481 ath_err(ath9k_hw_common(sc->sc_ah),
482 "dma_mapping_error() on TX\n");
483 ath_tx_return_buffer(sc, bf);
484 - goto error;
485 + return NULL;
486 }
487
488 fi->bf = bf;
489
490 return bf;
491 -
492 -error:
493 - if (dequeue)
494 - __skb_unlink(skb, &tid->buf_q);
495 - dev_kfree_skb_any(skb);
496 - return NULL;
497 }
498
499 /* FIXME: tx power */
500 @@ -1902,9 +1902,14 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
501 */
502 ath_tx_send_ampdu(sc, tid, skb, txctl);
503 } else {
504 - bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb, false);
505 - if (!bf)
506 + bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
507 + if (!bf) {
508 + if (txctl->paprd)
509 + dev_kfree_skb_any(skb);
510 + else
511 + ieee80211_free_txskb(sc->hw, skb);
512 return;
513 + }
514
515 bf->bf_state.bfs_paprd = txctl->paprd;
516
517 diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
518 index 8a5e25d..b0fefc4 100644
519 --- a/drivers/scsi/hpsa.c
520 +++ b/drivers/scsi/hpsa.c
521 @@ -548,12 +548,42 @@ static void set_performant_mode(struct ctlr_info *h, struct CommandList *c)
522 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
523 }
524
525 +static int is_firmware_flash_cmd(u8 *cdb)
526 +{
527 + return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE;
528 +}
529 +
530 +/*
531 + * During firmware flash, the heartbeat register may not update as frequently
532 + * as it should. So we dial down lockup detection during firmware flash. and
533 + * dial it back up when firmware flash completes.
534 + */
535 +#define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
536 +#define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
537 +static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h,
538 + struct CommandList *c)
539 +{
540 + if (!is_firmware_flash_cmd(c->Request.CDB))
541 + return;
542 + atomic_inc(&h->firmware_flash_in_progress);
543 + h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH;
544 +}
545 +
546 +static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h,
547 + struct CommandList *c)
548 +{
549 + if (is_firmware_flash_cmd(c->Request.CDB) &&
550 + atomic_dec_and_test(&h->firmware_flash_in_progress))
551 + h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
552 +}
553 +
554 static void enqueue_cmd_and_start_io(struct ctlr_info *h,
555 struct CommandList *c)
556 {
557 unsigned long flags;
558
559 set_performant_mode(h, c);
560 + dial_down_lockup_detection_during_fw_flash(h, c);
561 spin_lock_irqsave(&h->lock, flags);
562 addQ(&h->reqQ, c);
563 h->Qdepth++;
564 @@ -3049,6 +3079,7 @@ static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
565 static inline void finish_cmd(struct CommandList *c, u32 raw_tag)
566 {
567 removeQ(c);
568 + dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
569 if (likely(c->cmd_type == CMD_SCSI))
570 complete_scsi_command(c);
571 else if (c->cmd_type == CMD_IOCTL_PEND)
572 @@ -4189,9 +4220,6 @@ static void controller_lockup_detected(struct ctlr_info *h)
573 spin_unlock_irqrestore(&h->lock, flags);
574 }
575
576 -#define HEARTBEAT_SAMPLE_INTERVAL (10 * HZ)
577 -#define HEARTBEAT_CHECK_MINIMUM_INTERVAL (HEARTBEAT_SAMPLE_INTERVAL / 2)
578 -
579 static void detect_controller_lockup(struct ctlr_info *h)
580 {
581 u64 now;
582 @@ -4202,7 +4230,7 @@ static void detect_controller_lockup(struct ctlr_info *h)
583 now = get_jiffies_64();
584 /* If we've received an interrupt recently, we're ok. */
585 if (time_after64(h->last_intr_timestamp +
586 - (HEARTBEAT_CHECK_MINIMUM_INTERVAL), now))
587 + (h->heartbeat_sample_interval), now))
588 return;
589
590 /*
591 @@ -4211,7 +4239,7 @@ static void detect_controller_lockup(struct ctlr_info *h)
592 * otherwise don't care about signals in this thread.
593 */
594 if (time_after64(h->last_heartbeat_timestamp +
595 - (HEARTBEAT_CHECK_MINIMUM_INTERVAL), now))
596 + (h->heartbeat_sample_interval), now))
597 return;
598
599 /* If heartbeat has not changed since we last looked, we're not ok. */
600 @@ -4253,6 +4281,7 @@ static void add_ctlr_to_lockup_detector_list(struct ctlr_info *h)
601 {
602 unsigned long flags;
603
604 + h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
605 spin_lock_irqsave(&lockup_detector_lock, flags);
606 list_add_tail(&h->lockup_list, &hpsa_ctlr_list);
607 spin_unlock_irqrestore(&lockup_detector_lock, flags);
608 diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
609 index 7b28d54..6f30a6f 100644
610 --- a/drivers/scsi/hpsa.h
611 +++ b/drivers/scsi/hpsa.h
612 @@ -123,6 +123,8 @@ struct ctlr_info {
613 u64 last_intr_timestamp;
614 u32 last_heartbeat;
615 u64 last_heartbeat_timestamp;
616 + u32 heartbeat_sample_interval;
617 + atomic_t firmware_flash_in_progress;
618 u32 lockup_detected;
619 struct list_head lockup_list;
620 };
621 diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
622 index 8049815..cdd742e 100644
623 --- a/drivers/scsi/hpsa_cmd.h
624 +++ b/drivers/scsi/hpsa_cmd.h
625 @@ -162,6 +162,7 @@ struct SenseSubsystem_info {
626 #define BMIC_WRITE 0x27
627 #define BMIC_CACHE_FLUSH 0xc2
628 #define HPSA_CACHE_FLUSH 0x01 /* C2 was already being used by HPSA */
629 +#define BMIC_FLASH_FIRMWARE 0xF7
630
631 /* Command List Structure */
632 union SCSI3Addr {
633 diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
634 index 182d5a5..f4cc413 100644
635 --- a/drivers/scsi/scsi_debug.c
636 +++ b/drivers/scsi/scsi_debug.c
637 @@ -2054,7 +2054,7 @@ static void unmap_region(sector_t lba, unsigned int len)
638 block = lba + alignment;
639 rem = do_div(block, granularity);
640
641 - if (rem == 0 && lba + granularity <= end && block < map_size) {
642 + if (rem == 0 && lba + granularity < end && block < map_size) {
643 clear_bit(block, map_storep);
644 if (scsi_debug_lbprz)
645 memset(fake_storep +
646 diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
647 index 83a1972..40a4570 100644
648 --- a/drivers/scsi/storvsc_drv.c
649 +++ b/drivers/scsi/storvsc_drv.c
650 @@ -1211,7 +1211,12 @@ static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
651 /*
652 * At this point, all outstanding requests in the adapter
653 * should have been flushed out and return to us
654 + * There is a potential race here where the host may be in
655 + * the process of responding when we return from here.
656 + * Just wait for all in-transit packets to be accounted for
657 + * before we return from here.
658 */
659 + storvsc_wait_to_drain(stor_device);
660
661 return SUCCESS;
662 }
663 diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
664 index e326d17..23e122a 100644
665 --- a/drivers/target/iscsi/iscsi_target.c
666 +++ b/drivers/target/iscsi/iscsi_target.c
667 @@ -3196,7 +3196,6 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
668 len += 1;
669
670 if ((len + payload_len) > buffer_len) {
671 - spin_unlock(&tiqn->tiqn_tpg_lock);
672 end_of_buf = 1;
673 goto eob;
674 }
675 @@ -3349,6 +3348,7 @@ static int iscsit_send_reject(
676 hdr->opcode = ISCSI_OP_REJECT;
677 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
678 hton24(hdr->dlength, ISCSI_HDR_LEN);
679 + hdr->ffffffff = 0xffffffff;
680 cmd->stat_sn = conn->stat_sn++;
681 hdr->statsn = cpu_to_be32(cmd->stat_sn);
682 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
683 diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
684 index d1c4bc2..1596aec 100644
685 --- a/drivers/target/iscsi/iscsi_target_core.h
686 +++ b/drivers/target/iscsi/iscsi_target_core.h
687 @@ -25,10 +25,10 @@
688 #define NA_DATAOUT_TIMEOUT_RETRIES 5
689 #define NA_DATAOUT_TIMEOUT_RETRIES_MAX 15
690 #define NA_DATAOUT_TIMEOUT_RETRIES_MIN 1
691 -#define NA_NOPIN_TIMEOUT 5
692 +#define NA_NOPIN_TIMEOUT 15
693 #define NA_NOPIN_TIMEOUT_MAX 60
694 #define NA_NOPIN_TIMEOUT_MIN 3
695 -#define NA_NOPIN_RESPONSE_TIMEOUT 5
696 +#define NA_NOPIN_RESPONSE_TIMEOUT 30
697 #define NA_NOPIN_RESPONSE_TIMEOUT_MAX 60
698 #define NA_NOPIN_RESPONSE_TIMEOUT_MIN 3
699 #define NA_RANDOM_DATAIN_PDU_OFFSETS 0
700 diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
701 index 879d8d0..c3d7bf54 100644
702 --- a/drivers/target/iscsi/iscsi_target_tpg.c
703 +++ b/drivers/target/iscsi/iscsi_target_tpg.c
704 @@ -672,6 +672,12 @@ int iscsit_ta_generate_node_acls(
705 pr_debug("iSCSI_TPG[%hu] - Generate Initiator Portal Group ACLs: %s\n",
706 tpg->tpgt, (a->generate_node_acls) ? "Enabled" : "Disabled");
707
708 + if (flag == 1 && a->cache_dynamic_acls == 0) {
709 + pr_debug("Explicitly setting cache_dynamic_acls=1 when "
710 + "generate_node_acls=1\n");
711 + a->cache_dynamic_acls = 1;
712 + }
713 +
714 return 0;
715 }
716
717 @@ -711,6 +717,12 @@ int iscsit_ta_cache_dynamic_acls(
718 return -EINVAL;
719 }
720
721 + if (a->generate_node_acls == 1 && flag == 0) {
722 + pr_debug("Skipping cache_dynamic_acls=0 when"
723 + " generate_node_acls=1\n");
724 + return 0;
725 + }
726 +
727 a->cache_dynamic_acls = flag;
728 pr_debug("iSCSI_TPG[%hu] - Cache Dynamic Initiator Portal Group"
729 " ACLs %s\n", tpg->tpgt, (a->cache_dynamic_acls) ?
730 diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
731 index cbb6653..3f90d4b 100644
732 --- a/drivers/target/target_core_configfs.c
733 +++ b/drivers/target/target_core_configfs.c
734 @@ -3115,6 +3115,7 @@ static int __init target_core_init_configfs(void)
735 GFP_KERNEL);
736 if (!target_cg->default_groups) {
737 pr_err("Unable to allocate target_cg->default_groups\n");
738 + ret = -ENOMEM;
739 goto out_global;
740 }
741
742 @@ -3130,6 +3131,7 @@ static int __init target_core_init_configfs(void)
743 GFP_KERNEL);
744 if (!hba_cg->default_groups) {
745 pr_err("Unable to allocate hba_cg->default_groups\n");
746 + ret = -ENOMEM;
747 goto out_global;
748 }
749 config_group_init_type_name(&alua_group,
750 @@ -3145,6 +3147,7 @@ static int __init target_core_init_configfs(void)
751 GFP_KERNEL);
752 if (!alua_cg->default_groups) {
753 pr_err("Unable to allocate alua_cg->default_groups\n");
754 + ret = -ENOMEM;
755 goto out_global;
756 }
757
758 @@ -3156,14 +3159,17 @@ static int __init target_core_init_configfs(void)
759 * Add core/alua/lu_gps/default_lu_gp
760 */
761 lu_gp = core_alua_allocate_lu_gp("default_lu_gp", 1);
762 - if (IS_ERR(lu_gp))
763 + if (IS_ERR(lu_gp)) {
764 + ret = -ENOMEM;
765 goto out_global;
766 + }
767
768 lu_gp_cg = &alua_lu_gps_group;
769 lu_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
770 GFP_KERNEL);
771 if (!lu_gp_cg->default_groups) {
772 pr_err("Unable to allocate lu_gp_cg->default_groups\n");
773 + ret = -ENOMEM;
774 goto out_global;
775 }
776
777 diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
778 index 2156188..18d06be 100644
779 --- a/drivers/tty/vt/vt.c
780 +++ b/drivers/tty/vt/vt.c
781 @@ -3475,6 +3475,19 @@ int con_debug_enter(struct vc_data *vc)
782 kdb_set(2, setargs);
783 }
784 }
785 + if (vc->vc_cols < 999) {
786 + int colcount;
787 + char cols[4];
788 + const char *setargs[3] = {
789 + "set",
790 + "COLUMNS",
791 + cols,
792 + };
793 + if (kdbgetintenv(setargs[0], &colcount)) {
794 + snprintf(cols, 4, "%i", vc->vc_cols);
795 + kdb_set(2, setargs);
796 + }
797 + }
798 #endif /* CONFIG_KGDB_KDB */
799 return ret;
800 }
801 diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
802 index 1b632cb..7f2fac1 100644
803 --- a/drivers/usb/class/cdc-acm.c
804 +++ b/drivers/usb/class/cdc-acm.c
805 @@ -1551,6 +1551,9 @@ static const struct usb_device_id acm_ids[] = {
806 Maybe we should define a new
807 quirk for this. */
808 },
809 + { USB_DEVICE(0x0572, 0x1340), /* Conexant CX93010-2x UCMxx */
810 + .driver_info = NO_UNION_NORMAL,
811 + },
812 { USB_DEVICE(0x1bbb, 0x0003), /* Alcatel OT-I650 */
813 .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
814 },
815 diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
816 index 9d7bcd9..be6952e 100644
817 --- a/drivers/usb/gadget/at91_udc.c
818 +++ b/drivers/usb/gadget/at91_udc.c
819 @@ -1735,7 +1735,7 @@ static int __devinit at91udc_probe(struct platform_device *pdev)
820 int retval;
821 struct resource *res;
822
823 - if (!dev->platform_data) {
824 + if (!dev->platform_data && !pdev->dev.of_node) {
825 /* small (so we copy it) but critical! */
826 DBG("missing platform_data\n");
827 return -ENODEV;
828 diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
829 index a159b63..85d8110 100644
830 --- a/drivers/video/udlfb.c
831 +++ b/drivers/video/udlfb.c
832 @@ -647,7 +647,7 @@ static ssize_t dlfb_ops_write(struct fb_info *info, const char __user *buf,
833 result = fb_sys_write(info, buf, count, ppos);
834
835 if (result > 0) {
836 - int start = max((int)(offset / info->fix.line_length) - 1, 0);
837 + int start = max((int)(offset / info->fix.line_length), 0);
838 int lines = min((u32)((result / info->fix.line_length) + 1),
839 (u32)info->var.yres);
840
841 diff --git a/drivers/video/via/via_clock.c b/drivers/video/via/via_clock.c
842 index af8f26b..db1e392 100644
843 --- a/drivers/video/via/via_clock.c
844 +++ b/drivers/video/via/via_clock.c
845 @@ -25,6 +25,7 @@
846
847 #include <linux/kernel.h>
848 #include <linux/via-core.h>
849 +#include <asm/olpc.h>
850 #include "via_clock.h"
851 #include "global.h"
852 #include "debug.h"
853 @@ -289,6 +290,10 @@ static void dummy_set_pll(struct via_pll_config config)
854 printk(KERN_INFO "Using undocumented set PLL.\n%s", via_slap);
855 }
856
857 +static void noop_set_clock_state(u8 state)
858 +{
859 +}
860 +
861 void via_clock_init(struct via_clock *clock, int gfx_chip)
862 {
863 switch (gfx_chip) {
864 @@ -346,4 +351,18 @@ void via_clock_init(struct via_clock *clock, int gfx_chip)
865 break;
866
867 }
868 +
869 + if (machine_is_olpc()) {
870 + /* The OLPC XO-1.5 cannot suspend/resume reliably if the
871 + * IGA1/IGA2 clocks are set as on or off (memory rot
872 + * occasionally happens during suspend under such
873 + * configurations).
874 + *
875 + * The only known stable scenario is to leave this bits as-is,
876 + * which in their default states are documented to enable the
877 + * clock only when it is needed.
878 + */
879 + clock->set_primary_clock_state = noop_set_clock_state;
880 + clock->set_secondary_clock_state = noop_set_clock_state;
881 + }
882 }
883 diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
884 index 75e5f1c..8c4292f 100644
885 --- a/fs/autofs4/root.c
886 +++ b/fs/autofs4/root.c
887 @@ -392,10 +392,12 @@ static struct vfsmount *autofs4_d_automount(struct path *path)
888 ino->flags |= AUTOFS_INF_PENDING;
889 spin_unlock(&sbi->fs_lock);
890 status = autofs4_mount_wait(dentry);
891 - if (status)
892 - return ERR_PTR(status);
893 spin_lock(&sbi->fs_lock);
894 ino->flags &= ~AUTOFS_INF_PENDING;
895 + if (status) {
896 + spin_unlock(&sbi->fs_lock);
897 + return ERR_PTR(status);
898 + }
899 }
900 done:
901 if (!(ino->flags & AUTOFS_INF_EXPIRING)) {
902 diff --git a/fs/ceph/export.c b/fs/ceph/export.c
903 index fbb2a64..4098ccf 100644
904 --- a/fs/ceph/export.c
905 +++ b/fs/ceph/export.c
906 @@ -89,7 +89,7 @@ static int ceph_encode_fh(struct dentry *dentry, u32 *rawfh, int *max_len,
907 * FIXME: we should try harder by querying the mds for the ino.
908 */
909 static struct dentry *__fh_to_dentry(struct super_block *sb,
910 - struct ceph_nfs_fh *fh)
911 + struct ceph_nfs_fh *fh, int fh_len)
912 {
913 struct ceph_mds_client *mdsc = ceph_sb_to_client(sb)->mdsc;
914 struct inode *inode;
915 @@ -97,6 +97,9 @@ static struct dentry *__fh_to_dentry(struct super_block *sb,
916 struct ceph_vino vino;
917 int err;
918
919 + if (fh_len < sizeof(*fh) / 4)
920 + return ERR_PTR(-ESTALE);
921 +
922 dout("__fh_to_dentry %llx\n", fh->ino);
923 vino.ino = fh->ino;
924 vino.snap = CEPH_NOSNAP;
925 @@ -140,7 +143,7 @@ static struct dentry *__fh_to_dentry(struct super_block *sb,
926 * convert connectable fh to dentry
927 */
928 static struct dentry *__cfh_to_dentry(struct super_block *sb,
929 - struct ceph_nfs_confh *cfh)
930 + struct ceph_nfs_confh *cfh, int fh_len)
931 {
932 struct ceph_mds_client *mdsc = ceph_sb_to_client(sb)->mdsc;
933 struct inode *inode;
934 @@ -148,6 +151,9 @@ static struct dentry *__cfh_to_dentry(struct super_block *sb,
935 struct ceph_vino vino;
936 int err;
937
938 + if (fh_len < sizeof(*cfh) / 4)
939 + return ERR_PTR(-ESTALE);
940 +
941 dout("__cfh_to_dentry %llx (%llx/%x)\n",
942 cfh->ino, cfh->parent_ino, cfh->parent_name_hash);
943
944 @@ -197,9 +203,11 @@ static struct dentry *ceph_fh_to_dentry(struct super_block *sb, struct fid *fid,
945 int fh_len, int fh_type)
946 {
947 if (fh_type == 1)
948 - return __fh_to_dentry(sb, (struct ceph_nfs_fh *)fid->raw);
949 + return __fh_to_dentry(sb, (struct ceph_nfs_fh *)fid->raw,
950 + fh_len);
951 else
952 - return __cfh_to_dentry(sb, (struct ceph_nfs_confh *)fid->raw);
953 + return __cfh_to_dentry(sb, (struct ceph_nfs_confh *)fid->raw,
954 + fh_len);
955 }
956
957 /*
958 @@ -220,6 +228,8 @@ static struct dentry *ceph_fh_to_parent(struct super_block *sb,
959
960 if (fh_type == 1)
961 return ERR_PTR(-ESTALE);
962 + if (fh_len < sizeof(*cfh) / 4)
963 + return ERR_PTR(-ESTALE);
964
965 pr_debug("fh_to_parent %llx/%d\n", cfh->parent_ino,
966 cfh->parent_name_hash);
967 diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
968 index 867b64c..56e3aa5 100644
969 --- a/fs/ecryptfs/ecryptfs_kernel.h
970 +++ b/fs/ecryptfs/ecryptfs_kernel.h
971 @@ -568,6 +568,8 @@ struct ecryptfs_open_req {
972 struct inode *ecryptfs_get_inode(struct inode *lower_inode,
973 struct super_block *sb);
974 void ecryptfs_i_size_init(const char *page_virt, struct inode *inode);
975 +int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry,
976 + struct inode *ecryptfs_inode);
977 int ecryptfs_decode_and_decrypt_filename(char **decrypted_name,
978 size_t *decrypted_name_size,
979 struct dentry *ecryptfs_dentry,
980 diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
981 index 2b17f2f..d45ba45 100644
982 --- a/fs/ecryptfs/file.c
983 +++ b/fs/ecryptfs/file.c
984 @@ -138,29 +138,50 @@ out:
985 return rc;
986 }
987
988 -static void ecryptfs_vma_close(struct vm_area_struct *vma)
989 -{
990 - filemap_write_and_wait(vma->vm_file->f_mapping);
991 -}
992 -
993 -static const struct vm_operations_struct ecryptfs_file_vm_ops = {
994 - .close = ecryptfs_vma_close,
995 - .fault = filemap_fault,
996 -};
997 +struct kmem_cache *ecryptfs_file_info_cache;
998
999 -static int ecryptfs_file_mmap(struct file *file, struct vm_area_struct *vma)
1000 +static int read_or_initialize_metadata(struct dentry *dentry)
1001 {
1002 + struct inode *inode = dentry->d_inode;
1003 + struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
1004 + struct ecryptfs_crypt_stat *crypt_stat;
1005 int rc;
1006
1007 - rc = generic_file_mmap(file, vma);
1008 + crypt_stat = &ecryptfs_inode_to_private(inode)->crypt_stat;
1009 + mount_crypt_stat = &ecryptfs_superblock_to_private(
1010 + inode->i_sb)->mount_crypt_stat;
1011 + mutex_lock(&crypt_stat->cs_mutex);
1012 +
1013 + if (crypt_stat->flags & ECRYPTFS_POLICY_APPLIED &&
1014 + crypt_stat->flags & ECRYPTFS_KEY_VALID) {
1015 + rc = 0;
1016 + goto out;
1017 + }
1018 +
1019 + rc = ecryptfs_read_metadata(dentry);
1020 if (!rc)
1021 - vma->vm_ops = &ecryptfs_file_vm_ops;
1022 + goto out;
1023 +
1024 + if (mount_crypt_stat->flags & ECRYPTFS_PLAINTEXT_PASSTHROUGH_ENABLED) {
1025 + crypt_stat->flags &= ~(ECRYPTFS_I_SIZE_INITIALIZED
1026 + | ECRYPTFS_ENCRYPTED);
1027 + rc = 0;
1028 + goto out;
1029 + }
1030 +
1031 + if (!(mount_crypt_stat->flags & ECRYPTFS_XATTR_METADATA_ENABLED) &&
1032 + !i_size_read(ecryptfs_inode_to_lower(inode))) {
1033 + rc = ecryptfs_initialize_file(dentry, inode);
1034 + if (!rc)
1035 + goto out;
1036 + }
1037
1038 + rc = -EIO;
1039 +out:
1040 + mutex_unlock(&crypt_stat->cs_mutex);
1041 return rc;
1042 }
1043
1044 -struct kmem_cache *ecryptfs_file_info_cache;
1045 -
1046 /**
1047 * ecryptfs_open
1048 * @inode: inode speciying file to open
1049 @@ -236,32 +257,9 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
1050 rc = 0;
1051 goto out;
1052 }
1053 - mutex_lock(&crypt_stat->cs_mutex);
1054 - if (!(crypt_stat->flags & ECRYPTFS_POLICY_APPLIED)
1055 - || !(crypt_stat->flags & ECRYPTFS_KEY_VALID)) {
1056 - rc = ecryptfs_read_metadata(ecryptfs_dentry);
1057 - if (rc) {
1058 - ecryptfs_printk(KERN_DEBUG,
1059 - "Valid headers not found\n");
1060 - if (!(mount_crypt_stat->flags
1061 - & ECRYPTFS_PLAINTEXT_PASSTHROUGH_ENABLED)) {
1062 - rc = -EIO;
1063 - printk(KERN_WARNING "Either the lower file "
1064 - "is not in a valid eCryptfs format, "
1065 - "or the key could not be retrieved. "
1066 - "Plaintext passthrough mode is not "
1067 - "enabled; returning -EIO\n");
1068 - mutex_unlock(&crypt_stat->cs_mutex);
1069 - goto out_put;
1070 - }
1071 - rc = 0;
1072 - crypt_stat->flags &= ~(ECRYPTFS_I_SIZE_INITIALIZED
1073 - | ECRYPTFS_ENCRYPTED);
1074 - mutex_unlock(&crypt_stat->cs_mutex);
1075 - goto out;
1076 - }
1077 - }
1078 - mutex_unlock(&crypt_stat->cs_mutex);
1079 + rc = read_or_initialize_metadata(ecryptfs_dentry);
1080 + if (rc)
1081 + goto out_put;
1082 ecryptfs_printk(KERN_DEBUG, "inode w/ addr = [0x%p], i_ino = "
1083 "[0x%.16lx] size: [0x%.16llx]\n", inode, inode->i_ino,
1084 (unsigned long long)i_size_read(inode));
1085 @@ -277,8 +275,14 @@ out:
1086
1087 static int ecryptfs_flush(struct file *file, fl_owner_t td)
1088 {
1089 - return file->f_mode & FMODE_WRITE
1090 - ? filemap_write_and_wait(file->f_mapping) : 0;
1091 + struct file *lower_file = ecryptfs_file_to_lower(file);
1092 +
1093 + if (lower_file->f_op && lower_file->f_op->flush) {
1094 + filemap_write_and_wait(file->f_mapping);
1095 + return lower_file->f_op->flush(lower_file, td);
1096 + }
1097 +
1098 + return 0;
1099 }
1100
1101 static int ecryptfs_release(struct inode *inode, struct file *file)
1102 @@ -292,15 +296,7 @@ static int ecryptfs_release(struct inode *inode, struct file *file)
1103 static int
1104 ecryptfs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1105 {
1106 - int rc = 0;
1107 -
1108 - rc = generic_file_fsync(file, start, end, datasync);
1109 - if (rc)
1110 - goto out;
1111 - rc = vfs_fsync_range(ecryptfs_file_to_lower(file), start, end,
1112 - datasync);
1113 -out:
1114 - return rc;
1115 + return vfs_fsync(ecryptfs_file_to_lower(file), datasync);
1116 }
1117
1118 static int ecryptfs_fasync(int fd, struct file *file, int flag)
1119 @@ -369,7 +365,7 @@ const struct file_operations ecryptfs_main_fops = {
1120 #ifdef CONFIG_COMPAT
1121 .compat_ioctl = ecryptfs_compat_ioctl,
1122 #endif
1123 - .mmap = ecryptfs_file_mmap,
1124 + .mmap = generic_file_mmap,
1125 .open = ecryptfs_open,
1126 .flush = ecryptfs_flush,
1127 .release = ecryptfs_release,
1128 diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
1129 index 6f5fb1a..11030b2 100644
1130 --- a/fs/ecryptfs/inode.c
1131 +++ b/fs/ecryptfs/inode.c
1132 @@ -143,6 +143,31 @@ static int ecryptfs_interpose(struct dentry *lower_dentry,
1133 return 0;
1134 }
1135
1136 +static int ecryptfs_do_unlink(struct inode *dir, struct dentry *dentry,
1137 + struct inode *inode)
1138 +{
1139 + struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
1140 + struct inode *lower_dir_inode = ecryptfs_inode_to_lower(dir);
1141 + struct dentry *lower_dir_dentry;
1142 + int rc;
1143 +
1144 + dget(lower_dentry);
1145 + lower_dir_dentry = lock_parent(lower_dentry);
1146 + rc = vfs_unlink(lower_dir_inode, lower_dentry);
1147 + if (rc) {
1148 + printk(KERN_ERR "Error in vfs_unlink; rc = [%d]\n", rc);
1149 + goto out_unlock;
1150 + }
1151 + fsstack_copy_attr_times(dir, lower_dir_inode);
1152 + set_nlink(inode, ecryptfs_inode_to_lower(inode)->i_nlink);
1153 + inode->i_ctime = dir->i_ctime;
1154 + d_drop(dentry);
1155 +out_unlock:
1156 + unlock_dir(lower_dir_dentry);
1157 + dput(lower_dentry);
1158 + return rc;
1159 +}
1160 +
1161 /**
1162 * ecryptfs_do_create
1163 * @directory_inode: inode of the new file's dentry's parent in ecryptfs
1164 @@ -182,8 +207,10 @@ ecryptfs_do_create(struct inode *directory_inode,
1165 }
1166 inode = __ecryptfs_get_inode(lower_dentry->d_inode,
1167 directory_inode->i_sb);
1168 - if (IS_ERR(inode))
1169 + if (IS_ERR(inode)) {
1170 + vfs_unlink(lower_dir_dentry->d_inode, lower_dentry);
1171 goto out_lock;
1172 + }
1173 fsstack_copy_attr_times(directory_inode, lower_dir_dentry->d_inode);
1174 fsstack_copy_inode_size(directory_inode, lower_dir_dentry->d_inode);
1175 out_lock:
1176 @@ -200,8 +227,8 @@ out:
1177 *
1178 * Returns zero on success
1179 */
1180 -static int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry,
1181 - struct inode *ecryptfs_inode)
1182 +int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry,
1183 + struct inode *ecryptfs_inode)
1184 {
1185 struct ecryptfs_crypt_stat *crypt_stat =
1186 &ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat;
1187 @@ -265,7 +292,9 @@ ecryptfs_create(struct inode *directory_inode, struct dentry *ecryptfs_dentry,
1188 * that this on disk file is prepared to be an ecryptfs file */
1189 rc = ecryptfs_initialize_file(ecryptfs_dentry, ecryptfs_inode);
1190 if (rc) {
1191 - drop_nlink(ecryptfs_inode);
1192 + ecryptfs_do_unlink(directory_inode, ecryptfs_dentry,
1193 + ecryptfs_inode);
1194 + make_bad_inode(ecryptfs_inode);
1195 unlock_new_inode(ecryptfs_inode);
1196 iput(ecryptfs_inode);
1197 goto out;
1198 @@ -477,27 +506,7 @@ out_lock:
1199
1200 static int ecryptfs_unlink(struct inode *dir, struct dentry *dentry)
1201 {
1202 - int rc = 0;
1203 - struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
1204 - struct inode *lower_dir_inode = ecryptfs_inode_to_lower(dir);
1205 - struct dentry *lower_dir_dentry;
1206 -
1207 - dget(lower_dentry);
1208 - lower_dir_dentry = lock_parent(lower_dentry);
1209 - rc = vfs_unlink(lower_dir_inode, lower_dentry);
1210 - if (rc) {
1211 - printk(KERN_ERR "Error in vfs_unlink; rc = [%d]\n", rc);
1212 - goto out_unlock;
1213 - }
1214 - fsstack_copy_attr_times(dir, lower_dir_inode);
1215 - set_nlink(dentry->d_inode,
1216 - ecryptfs_inode_to_lower(dentry->d_inode)->i_nlink);
1217 - dentry->d_inode->i_ctime = dir->i_ctime;
1218 - d_drop(dentry);
1219 -out_unlock:
1220 - unlock_dir(lower_dir_dentry);
1221 - dput(lower_dentry);
1222 - return rc;
1223 + return ecryptfs_do_unlink(dir, dentry, dentry->d_inode);
1224 }
1225
1226 static int ecryptfs_symlink(struct inode *dir, struct dentry *dentry,
1227 @@ -1007,12 +1016,6 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia)
1228 goto out;
1229 }
1230
1231 - if (S_ISREG(inode->i_mode)) {
1232 - rc = filemap_write_and_wait(inode->i_mapping);
1233 - if (rc)
1234 - goto out;
1235 - fsstack_copy_attr_all(inode, lower_inode);
1236 - }
1237 memcpy(&lower_ia, ia, sizeof(lower_ia));
1238 if (ia->ia_valid & ATTR_FILE)
1239 lower_ia.ia_file = ecryptfs_file_to_lower(ia->ia_file);
1240 diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
1241 index 6895493..0f04d2e 100644
1242 --- a/fs/ecryptfs/main.c
1243 +++ b/fs/ecryptfs/main.c
1244 @@ -162,6 +162,7 @@ void ecryptfs_put_lower_file(struct inode *inode)
1245 inode_info = ecryptfs_inode_to_private(inode);
1246 if (atomic_dec_and_mutex_lock(&inode_info->lower_file_count,
1247 &inode_info->lower_file_mutex)) {
1248 + filemap_write_and_wait(inode->i_mapping);
1249 fput(inode_info->lower_file);
1250 inode_info->lower_file = NULL;
1251 mutex_unlock(&inode_info->lower_file_mutex);
1252 diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c
1253 index a46b3a8..bd1d57f 100644
1254 --- a/fs/ecryptfs/mmap.c
1255 +++ b/fs/ecryptfs/mmap.c
1256 @@ -66,18 +66,6 @@ static int ecryptfs_writepage(struct page *page, struct writeback_control *wbc)
1257 {
1258 int rc;
1259
1260 - /*
1261 - * Refuse to write the page out if we are called from reclaim context
1262 - * since our writepage() path may potentially allocate memory when
1263 - * calling into the lower fs vfs_write() which may in turn invoke
1264 - * us again.
1265 - */
1266 - if (current->flags & PF_MEMALLOC) {
1267 - redirty_page_for_writepage(wbc, page);
1268 - rc = 0;
1269 - goto out;
1270 - }
1271 -
1272 rc = ecryptfs_encrypt_page(page);
1273 if (rc) {
1274 ecryptfs_printk(KERN_WARNING, "Error encrypting "
1275 @@ -498,7 +486,6 @@ static int ecryptfs_write_end(struct file *file,
1276 struct ecryptfs_crypt_stat *crypt_stat =
1277 &ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat;
1278 int rc;
1279 - int need_unlock_page = 1;
1280
1281 ecryptfs_printk(KERN_DEBUG, "Calling fill_zeros_to_end_of_page"
1282 "(page w/ index = [0x%.16lx], to = [%d])\n", index, to);
1283 @@ -519,26 +506,26 @@ static int ecryptfs_write_end(struct file *file,
1284 "zeros in page with index = [0x%.16lx]\n", index);
1285 goto out;
1286 }
1287 - set_page_dirty(page);
1288 - unlock_page(page);
1289 - need_unlock_page = 0;
1290 + rc = ecryptfs_encrypt_page(page);
1291 + if (rc) {
1292 + ecryptfs_printk(KERN_WARNING, "Error encrypting page (upper "
1293 + "index [0x%.16lx])\n", index);
1294 + goto out;
1295 + }
1296 if (pos + copied > i_size_read(ecryptfs_inode)) {
1297 i_size_write(ecryptfs_inode, pos + copied);
1298 ecryptfs_printk(KERN_DEBUG, "Expanded file size to "
1299 "[0x%.16llx]\n",
1300 (unsigned long long)i_size_read(ecryptfs_inode));
1301 - balance_dirty_pages_ratelimited(mapping);
1302 - rc = ecryptfs_write_inode_size_to_metadata(ecryptfs_inode);
1303 - if (rc) {
1304 - printk(KERN_ERR "Error writing inode size to metadata; "
1305 - "rc = [%d]\n", rc);
1306 - goto out;
1307 - }
1308 }
1309 - rc = copied;
1310 + rc = ecryptfs_write_inode_size_to_metadata(ecryptfs_inode);
1311 + if (rc)
1312 + printk(KERN_ERR "Error writing inode size to metadata; "
1313 + "rc = [%d]\n", rc);
1314 + else
1315 + rc = copied;
1316 out:
1317 - if (need_unlock_page)
1318 - unlock_page(page);
1319 + unlock_page(page);
1320 page_cache_release(page);
1321 return rc;
1322 }
1323 diff --git a/fs/gfs2/export.c b/fs/gfs2/export.c
1324 index 70ba891..fdef7f0 100644
1325 --- a/fs/gfs2/export.c
1326 +++ b/fs/gfs2/export.c
1327 @@ -168,6 +168,8 @@ static struct dentry *gfs2_fh_to_dentry(struct super_block *sb, struct fid *fid,
1328 case GFS2_SMALL_FH_SIZE:
1329 case GFS2_LARGE_FH_SIZE:
1330 case GFS2_OLD_FH_SIZE:
1331 + if (fh_len < GFS2_SMALL_FH_SIZE)
1332 + return NULL;
1333 this.no_formal_ino = ((u64)be32_to_cpu(fh[0])) << 32;
1334 this.no_formal_ino |= be32_to_cpu(fh[1]);
1335 this.no_addr = ((u64)be32_to_cpu(fh[2])) << 32;
1336 @@ -187,6 +189,8 @@ static struct dentry *gfs2_fh_to_parent(struct super_block *sb, struct fid *fid,
1337 switch (fh_type) {
1338 case GFS2_LARGE_FH_SIZE:
1339 case GFS2_OLD_FH_SIZE:
1340 + if (fh_len < GFS2_LARGE_FH_SIZE)
1341 + return NULL;
1342 parent.no_formal_ino = ((u64)be32_to_cpu(fh[4])) << 32;
1343 parent.no_formal_ino |= be32_to_cpu(fh[5]);
1344 parent.no_addr = ((u64)be32_to_cpu(fh[6])) << 32;
1345 diff --git a/fs/isofs/export.c b/fs/isofs/export.c
1346 index dd4687f..516eb21 100644
1347 --- a/fs/isofs/export.c
1348 +++ b/fs/isofs/export.c
1349 @@ -179,7 +179,7 @@ static struct dentry *isofs_fh_to_parent(struct super_block *sb,
1350 {
1351 struct isofs_fid *ifid = (struct isofs_fid *)fid;
1352
1353 - if (fh_type != 2)
1354 + if (fh_len < 2 || fh_type != 2)
1355 return NULL;
1356
1357 return isofs_export_iget(sb,
1358 diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c
1359 index f2b9a57..9626bc8 100644
1360 --- a/fs/jbd/commit.c
1361 +++ b/fs/jbd/commit.c
1362 @@ -86,7 +86,12 @@ nope:
1363 static void release_data_buffer(struct buffer_head *bh)
1364 {
1365 if (buffer_freed(bh)) {
1366 + WARN_ON_ONCE(buffer_dirty(bh));
1367 clear_buffer_freed(bh);
1368 + clear_buffer_mapped(bh);
1369 + clear_buffer_new(bh);
1370 + clear_buffer_req(bh);
1371 + bh->b_bdev = NULL;
1372 release_buffer_page(bh);
1373 } else
1374 put_bh(bh);
1375 @@ -853,17 +858,35 @@ restart_loop:
1376 * there's no point in keeping a checkpoint record for
1377 * it. */
1378
1379 - /* A buffer which has been freed while still being
1380 - * journaled by a previous transaction may end up still
1381 - * being dirty here, but we want to avoid writing back
1382 - * that buffer in the future after the "add to orphan"
1383 - * operation been committed, That's not only a performance
1384 - * gain, it also stops aliasing problems if the buffer is
1385 - * left behind for writeback and gets reallocated for another
1386 - * use in a different page. */
1387 - if (buffer_freed(bh) && !jh->b_next_transaction) {
1388 - clear_buffer_freed(bh);
1389 - clear_buffer_jbddirty(bh);
1390 + /*
1391 + * A buffer which has been freed while still being journaled by
1392 + * a previous transaction.
1393 + */
1394 + if (buffer_freed(bh)) {
1395 + /*
1396 + * If the running transaction is the one containing
1397 + * "add to orphan" operation (b_next_transaction !=
1398 + * NULL), we have to wait for that transaction to
1399 + * commit before we can really get rid of the buffer.
1400 + * So just clear b_modified to not confuse transaction
1401 + * credit accounting and refile the buffer to
1402 + * BJ_Forget of the running transaction. If the just
1403 + * committed transaction contains "add to orphan"
1404 + * operation, we can completely invalidate the buffer
1405 + * now. We are rather throughout in that since the
1406 + * buffer may be still accessible when blocksize <
1407 + * pagesize and it is attached to the last partial
1408 + * page.
1409 + */
1410 + jh->b_modified = 0;
1411 + if (!jh->b_next_transaction) {
1412 + clear_buffer_freed(bh);
1413 + clear_buffer_jbddirty(bh);
1414 + clear_buffer_mapped(bh);
1415 + clear_buffer_new(bh);
1416 + clear_buffer_req(bh);
1417 + bh->b_bdev = NULL;
1418 + }
1419 }
1420
1421 if (buffer_jbddirty(bh)) {
1422 diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
1423 index b2a7e52..841cabc 100644
1424 --- a/fs/jbd/transaction.c
1425 +++ b/fs/jbd/transaction.c
1426 @@ -1845,15 +1845,16 @@ static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
1427 * We're outside-transaction here. Either or both of j_running_transaction
1428 * and j_committing_transaction may be NULL.
1429 */
1430 -static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
1431 +static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh,
1432 + int partial_page)
1433 {
1434 transaction_t *transaction;
1435 struct journal_head *jh;
1436 int may_free = 1;
1437 - int ret;
1438
1439 BUFFER_TRACE(bh, "entry");
1440
1441 +retry:
1442 /*
1443 * It is safe to proceed here without the j_list_lock because the
1444 * buffers cannot be stolen by try_to_free_buffers as long as we are
1445 @@ -1881,10 +1882,18 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
1446 * clear the buffer dirty bit at latest at the moment when the
1447 * transaction marking the buffer as freed in the filesystem
1448 * structures is committed because from that moment on the
1449 - * buffer can be reallocated and used by a different page.
1450 + * block can be reallocated and used by a different page.
1451 * Since the block hasn't been freed yet but the inode has
1452 * already been added to orphan list, it is safe for us to add
1453 * the buffer to BJ_Forget list of the newest transaction.
1454 + *
1455 + * Also we have to clear buffer_mapped flag of a truncated buffer
1456 + * because the buffer_head may be attached to the page straddling
1457 + * i_size (can happen only when blocksize < pagesize) and thus the
1458 + * buffer_head can be reused when the file is extended again. So we end
1459 + * up keeping around invalidated buffers attached to transactions'
1460 + * BJ_Forget list just to stop checkpointing code from cleaning up
1461 + * the transaction this buffer was modified in.
1462 */
1463 transaction = jh->b_transaction;
1464 if (transaction == NULL) {
1465 @@ -1911,13 +1920,9 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
1466 * committed, the buffer won't be needed any
1467 * longer. */
1468 JBUFFER_TRACE(jh, "checkpointed: add to BJ_Forget");
1469 - ret = __dispose_buffer(jh,
1470 + may_free = __dispose_buffer(jh,
1471 journal->j_running_transaction);
1472 - journal_put_journal_head(jh);
1473 - spin_unlock(&journal->j_list_lock);
1474 - jbd_unlock_bh_state(bh);
1475 - spin_unlock(&journal->j_state_lock);
1476 - return ret;
1477 + goto zap_buffer;
1478 } else {
1479 /* There is no currently-running transaction. So the
1480 * orphan record which we wrote for this file must have
1481 @@ -1925,13 +1930,9 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
1482 * the committing transaction, if it exists. */
1483 if (journal->j_committing_transaction) {
1484 JBUFFER_TRACE(jh, "give to committing trans");
1485 - ret = __dispose_buffer(jh,
1486 + may_free = __dispose_buffer(jh,
1487 journal->j_committing_transaction);
1488 - journal_put_journal_head(jh);
1489 - spin_unlock(&journal->j_list_lock);
1490 - jbd_unlock_bh_state(bh);
1491 - spin_unlock(&journal->j_state_lock);
1492 - return ret;
1493 + goto zap_buffer;
1494 } else {
1495 /* The orphan record's transaction has
1496 * committed. We can cleanse this buffer */
1497 @@ -1952,10 +1953,24 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
1498 }
1499 /*
1500 * The buffer is committing, we simply cannot touch
1501 - * it. So we just set j_next_transaction to the
1502 - * running transaction (if there is one) and mark
1503 - * buffer as freed so that commit code knows it should
1504 - * clear dirty bits when it is done with the buffer.
1505 + * it. If the page is straddling i_size we have to wait
1506 + * for commit and try again.
1507 + */
1508 + if (partial_page) {
1509 + tid_t tid = journal->j_committing_transaction->t_tid;
1510 +
1511 + journal_put_journal_head(jh);
1512 + spin_unlock(&journal->j_list_lock);
1513 + jbd_unlock_bh_state(bh);
1514 + spin_unlock(&journal->j_state_lock);
1515 + log_wait_commit(journal, tid);
1516 + goto retry;
1517 + }
1518 + /*
1519 + * OK, buffer won't be reachable after truncate. We just set
1520 + * j_next_transaction to the running transaction (if there is
1521 + * one) and mark buffer as freed so that commit code knows it
1522 + * should clear dirty bits when it is done with the buffer.
1523 */
1524 set_buffer_freed(bh);
1525 if (journal->j_running_transaction && buffer_jbddirty(bh))
1526 @@ -1978,6 +1993,14 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
1527 }
1528
1529 zap_buffer:
1530 + /*
1531 + * This is tricky. Although the buffer is truncated, it may be reused
1532 + * if blocksize < pagesize and it is attached to the page straddling
1533 + * EOF. Since the buffer might have been added to BJ_Forget list of the
1534 + * running transaction, journal_get_write_access() won't clear
1535 + * b_modified and credit accounting gets confused. So clear b_modified
1536 + * here. */
1537 + jh->b_modified = 0;
1538 journal_put_journal_head(jh);
1539 zap_buffer_no_jh:
1540 spin_unlock(&journal->j_list_lock);
1541 @@ -2026,7 +2049,8 @@ void journal_invalidatepage(journal_t *journal,
1542 if (offset <= curr_off) {
1543 /* This block is wholly outside the truncation point */
1544 lock_buffer(bh);
1545 - may_free &= journal_unmap_buffer(journal, bh);
1546 + may_free &= journal_unmap_buffer(journal, bh,
1547 + offset > 0);
1548 unlock_buffer(bh);
1549 }
1550 curr_off = next_off;
1551 diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
1552 index 7ef14b3..606a8dd 100644
1553 --- a/fs/lockd/mon.c
1554 +++ b/fs/lockd/mon.c
1555 @@ -40,6 +40,7 @@ struct nsm_args {
1556 u32 proc;
1557
1558 char *mon_name;
1559 + char *nodename;
1560 };
1561
1562 struct nsm_res {
1563 @@ -94,6 +95,7 @@ static int nsm_mon_unmon(struct nsm_handle *nsm, u32 proc, struct nsm_res *res,
1564 .vers = 3,
1565 .proc = NLMPROC_NSM_NOTIFY,
1566 .mon_name = nsm->sm_mon_name,
1567 + .nodename = utsname()->nodename,
1568 };
1569 struct rpc_message msg = {
1570 .rpc_argp = &args,
1571 @@ -430,7 +432,7 @@ static void encode_my_id(struct xdr_stream *xdr, const struct nsm_args *argp)
1572 {
1573 __be32 *p;
1574
1575 - encode_nsm_string(xdr, utsname()->nodename);
1576 + encode_nsm_string(xdr, argp->nodename);
1577 p = xdr_reserve_space(xdr, 4 + 4 + 4);
1578 *p++ = cpu_to_be32(argp->prog);
1579 *p++ = cpu_to_be32(argp->vers);
1580 diff --git a/fs/nfsd/nfs4idmap.c b/fs/nfsd/nfs4idmap.c
1581 index 322d11c..01b090d 100644
1582 --- a/fs/nfsd/nfs4idmap.c
1583 +++ b/fs/nfsd/nfs4idmap.c
1584 @@ -581,7 +581,7 @@ numeric_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namel
1585 /* Just to make sure it's null-terminated: */
1586 memcpy(buf, name, namelen);
1587 buf[namelen] = '\0';
1588 - ret = kstrtouint(name, 10, id);
1589 + ret = kstrtouint(buf, 10, id);
1590 return ret == 0;
1591 }
1592
1593 diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
1594 index e79c24e..a2f99d1 100644
1595 --- a/fs/nfsd/nfs4state.c
1596 +++ b/fs/nfsd/nfs4state.c
1597 @@ -3783,6 +3783,7 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
1598 memcpy(&close->cl_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
1599
1600 nfsd4_close_open_stateid(stp);
1601 + release_last_closed_stateid(oo);
1602 oo->oo_last_closed_stid = stp;
1603
1604 /* place unused nfs4_stateowners on so_close_lru list to be
1605 diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
1606 index 494c315..f99c1b4 100644
1607 --- a/fs/reiserfs/inode.c
1608 +++ b/fs/reiserfs/inode.c
1609 @@ -1573,8 +1573,10 @@ struct dentry *reiserfs_fh_to_dentry(struct super_block *sb, struct fid *fid,
1610 reiserfs_warning(sb, "reiserfs-13077",
1611 "nfsd/reiserfs, fhtype=%d, len=%d - odd",
1612 fh_type, fh_len);
1613 - fh_type = 5;
1614 + fh_type = fh_len;
1615 }
1616 + if (fh_len < 2)
1617 + return NULL;
1618
1619 return reiserfs_get_dentry(sb, fid->raw[0], fid->raw[1],
1620 (fh_type == 3 || fh_type >= 5) ? fid->raw[2] : 0);
1621 @@ -1583,6 +1585,8 @@ struct dentry *reiserfs_fh_to_dentry(struct super_block *sb, struct fid *fid,
1622 struct dentry *reiserfs_fh_to_parent(struct super_block *sb, struct fid *fid,
1623 int fh_len, int fh_type)
1624 {
1625 + if (fh_type > fh_len)
1626 + fh_type = fh_len;
1627 if (fh_type < 4)
1628 return NULL;
1629
1630 diff --git a/fs/xfs/xfs_export.c b/fs/xfs/xfs_export.c
1631 index 558910f..5703fb8 100644
1632 --- a/fs/xfs/xfs_export.c
1633 +++ b/fs/xfs/xfs_export.c
1634 @@ -195,6 +195,9 @@ xfs_fs_fh_to_parent(struct super_block *sb, struct fid *fid,
1635 struct xfs_fid64 *fid64 = (struct xfs_fid64 *)fid;
1636 struct inode *inode = NULL;
1637
1638 + if (fh_len < xfs_fileid_length(fileid_type))
1639 + return NULL;
1640 +
1641 switch (fileid_type) {
1642 case FILEID_INO32_GEN_PARENT:
1643 inode = xfs_nfs_get_inode(sb, fid->i32.parent_ino,
1644 diff --git a/include/linux/netfilter/ipset/ip_set_timeout.h b/include/linux/netfilter/ipset/ip_set_timeout.h
1645 index 4792320..41d9cfa 100644
1646 --- a/include/linux/netfilter/ipset/ip_set_timeout.h
1647 +++ b/include/linux/netfilter/ipset/ip_set_timeout.h
1648 @@ -30,6 +30,10 @@ ip_set_timeout_uget(struct nlattr *tb)
1649 {
1650 unsigned int timeout = ip_set_get_h32(tb);
1651
1652 + /* Normalize to fit into jiffies */
1653 + if (timeout > UINT_MAX/MSEC_PER_SEC)
1654 + timeout = UINT_MAX/MSEC_PER_SEC;
1655 +
1656 /* Userspace supplied TIMEOUT parameter: adjust crazy size */
1657 return timeout == IPSET_NO_TIMEOUT ? IPSET_NO_TIMEOUT - 1 : timeout;
1658 }
1659 diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
1660 index 72522f0..2389959 100644
1661 --- a/include/net/ip_vs.h
1662 +++ b/include/net/ip_vs.h
1663 @@ -1356,7 +1356,7 @@ static inline void ip_vs_notrack(struct sk_buff *skb)
1664 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
1665
1666 if (!ct || !nf_ct_is_untracked(ct)) {
1667 - nf_reset(skb);
1668 + nf_conntrack_put(skb->nfct);
1669 skb->nfct = &nf_ct_untracked_get()->ct_general;
1670 skb->nfctinfo = IP_CT_NEW;
1671 nf_conntrack_get(skb->nfct);
1672 diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h
1673 index a88fb69..ea6f8a4 100644
1674 --- a/include/net/netfilter/nf_conntrack_ecache.h
1675 +++ b/include/net/netfilter/nf_conntrack_ecache.h
1676 @@ -18,6 +18,7 @@ struct nf_conntrack_ecache {
1677 u16 ctmask; /* bitmask of ct events to be delivered */
1678 u16 expmask; /* bitmask of expect events to be delivered */
1679 u32 pid; /* netlink pid of destroyer */
1680 + struct timer_list timeout;
1681 };
1682
1683 static inline struct nf_conntrack_ecache *
1684 diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
1685 index bb9520f..572e604 100644
1686 --- a/kernel/debug/kdb/kdb_io.c
1687 +++ b/kernel/debug/kdb/kdb_io.c
1688 @@ -552,6 +552,7 @@ int vkdb_printf(const char *fmt, va_list ap)
1689 {
1690 int diag;
1691 int linecount;
1692 + int colcount;
1693 int logging, saved_loglevel = 0;
1694 int saved_trap_printk;
1695 int got_printf_lock = 0;
1696 @@ -584,6 +585,10 @@ int vkdb_printf(const char *fmt, va_list ap)
1697 if (diag || linecount <= 1)
1698 linecount = 24;
1699
1700 + diag = kdbgetintenv("COLUMNS", &colcount);
1701 + if (diag || colcount <= 1)
1702 + colcount = 80;
1703 +
1704 diag = kdbgetintenv("LOGGING", &logging);
1705 if (diag)
1706 logging = 0;
1707 @@ -690,7 +695,7 @@ kdb_printit:
1708 gdbstub_msg_write(kdb_buffer, retlen);
1709 } else {
1710 if (dbg_io_ops && !dbg_io_ops->is_console) {
1711 - len = strlen(kdb_buffer);
1712 + len = retlen;
1713 cp = kdb_buffer;
1714 while (len--) {
1715 dbg_io_ops->write_char(*cp);
1716 @@ -709,11 +714,29 @@ kdb_printit:
1717 printk(KERN_INFO "%s", kdb_buffer);
1718 }
1719
1720 - if (KDB_STATE(PAGER) && strchr(kdb_buffer, '\n'))
1721 - kdb_nextline++;
1722 + if (KDB_STATE(PAGER)) {
1723 + /*
1724 + * Check printed string to decide how to bump the
1725 + * kdb_nextline to control when the more prompt should
1726 + * show up.
1727 + */
1728 + int got = 0;
1729 + len = retlen;
1730 + while (len--) {
1731 + if (kdb_buffer[len] == '\n') {
1732 + kdb_nextline++;
1733 + got = 0;
1734 + } else if (kdb_buffer[len] == '\r') {
1735 + got = 0;
1736 + } else {
1737 + got++;
1738 + }
1739 + }
1740 + kdb_nextline += got / (colcount + 1);
1741 + }
1742
1743 /* check for having reached the LINES number of printed lines */
1744 - if (kdb_nextline == linecount) {
1745 + if (kdb_nextline >= linecount) {
1746 char buf1[16] = "";
1747 #if defined(CONFIG_SMP)
1748 char buf2[32];
1749 @@ -776,7 +799,7 @@ kdb_printit:
1750 kdb_grepping_flag = 0;
1751 kdb_printf("\n");
1752 } else if (buf1[0] == ' ') {
1753 - kdb_printf("\n");
1754 + kdb_printf("\r");
1755 suspend_grep = 1; /* for this recursion */
1756 } else if (buf1[0] == '\n') {
1757 kdb_nextline = linecount - 1;
1758 diff --git a/kernel/module.c b/kernel/module.c
1759 index 78ac6ec..61ea75e 100644
1760 --- a/kernel/module.c
1761 +++ b/kernel/module.c
1762 @@ -2729,6 +2729,10 @@ static int check_module_license_and_versions(struct module *mod)
1763 if (strcmp(mod->name, "driverloader") == 0)
1764 add_taint_module(mod, TAINT_PROPRIETARY_MODULE);
1765
1766 + /* lve claims to be GPL but upstream won't provide source */
1767 + if (strcmp(mod->name, "lve") == 0)
1768 + add_taint_module(mod, TAINT_PROPRIETARY_MODULE);
1769 +
1770 #ifdef CONFIG_MODVERSIONS
1771 if ((mod->num_syms && !mod->crcs)
1772 || (mod->num_gpl_syms && !mod->gpl_crcs)
1773 diff --git a/kernel/timer.c b/kernel/timer.c
1774 index a297ffc..6dfdb72 100644
1775 --- a/kernel/timer.c
1776 +++ b/kernel/timer.c
1777 @@ -63,6 +63,7 @@ EXPORT_SYMBOL(jiffies_64);
1778 #define TVR_SIZE (1 << TVR_BITS)
1779 #define TVN_MASK (TVN_SIZE - 1)
1780 #define TVR_MASK (TVR_SIZE - 1)
1781 +#define MAX_TVAL ((unsigned long)((1ULL << (TVR_BITS + 4*TVN_BITS)) - 1))
1782
1783 struct tvec {
1784 struct list_head vec[TVN_SIZE];
1785 @@ -356,11 +357,12 @@ static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
1786 vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK);
1787 } else {
1788 int i;
1789 - /* If the timeout is larger than 0xffffffff on 64-bit
1790 - * architectures then we use the maximum timeout:
1791 + /* If the timeout is larger than MAX_TVAL (on 64-bit
1792 + * architectures or with CONFIG_BASE_SMALL=1) then we
1793 + * use the maximum timeout.
1794 */
1795 - if (idx > 0xffffffffUL) {
1796 - idx = 0xffffffffUL;
1797 + if (idx > MAX_TVAL) {
1798 + idx = MAX_TVAL;
1799 expires = idx + base->timer_jiffies;
1800 }
1801 i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
1802 diff --git a/mm/shmem.c b/mm/shmem.c
1803 index 9d65a02..40383cd 100644
1804 --- a/mm/shmem.c
1805 +++ b/mm/shmem.c
1806 @@ -2018,12 +2018,14 @@ static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
1807 {
1808 struct inode *inode;
1809 struct dentry *dentry = NULL;
1810 - u64 inum = fid->raw[2];
1811 - inum = (inum << 32) | fid->raw[1];
1812 + u64 inum;
1813
1814 if (fh_len < 3)
1815 return NULL;
1816
1817 + inum = fid->raw[2];
1818 + inum = (inum << 32) | fid->raw[1];
1819 +
1820 inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
1821 shmem_match, fid->raw);
1822 if (inode) {
1823 diff --git a/net/core/pktgen.c b/net/core/pktgen.c
1824 index b81369b..8dae76f 100644
1825 --- a/net/core/pktgen.c
1826 +++ b/net/core/pktgen.c
1827 @@ -2932,7 +2932,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
1828 sizeof(struct ipv6hdr) - sizeof(struct udphdr) -
1829 pkt_dev->pkt_overhead;
1830
1831 - if (datalen < sizeof(struct pktgen_hdr)) {
1832 + if (datalen < 0 || datalen < sizeof(struct pktgen_hdr)) {
1833 datalen = sizeof(struct pktgen_hdr);
1834 if (net_ratelimit())
1835 pr_info("increased datalen to %d\n", datalen);
1836 diff --git a/net/ipv4/netfilter/nf_nat_sip.c b/net/ipv4/netfilter/nf_nat_sip.c
1837 index 57932c4..566be2d 100644
1838 --- a/net/ipv4/netfilter/nf_nat_sip.c
1839 +++ b/net/ipv4/netfilter/nf_nat_sip.c
1840 @@ -148,7 +148,7 @@ static unsigned int ip_nat_sip(struct sk_buff *skb, unsigned int dataoff,
1841 if (ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen,
1842 hdr, NULL, &matchoff, &matchlen,
1843 &addr, &port) > 0) {
1844 - unsigned int matchend, poff, plen, buflen, n;
1845 + unsigned int olen, matchend, poff, plen, buflen, n;
1846 char buffer[sizeof("nnn.nnn.nnn.nnn:nnnnn")];
1847
1848 /* We're only interested in headers related to this
1849 @@ -163,11 +163,12 @@ static unsigned int ip_nat_sip(struct sk_buff *skb, unsigned int dataoff,
1850 goto next;
1851 }
1852
1853 + olen = *datalen;
1854 if (!map_addr(skb, dataoff, dptr, datalen, matchoff, matchlen,
1855 &addr, port))
1856 return NF_DROP;
1857
1858 - matchend = matchoff + matchlen;
1859 + matchend = matchoff + matchlen + *datalen - olen;
1860
1861 /* The maddr= parameter (RFC 2361) specifies where to send
1862 * the reply. */
1863 @@ -501,7 +502,10 @@ static unsigned int ip_nat_sdp_media(struct sk_buff *skb, unsigned int dataoff,
1864 ret = nf_ct_expect_related(rtcp_exp);
1865 if (ret == 0)
1866 break;
1867 - else if (ret != -EBUSY) {
1868 + else if (ret == -EBUSY) {
1869 + nf_ct_unexpect_related(rtp_exp);
1870 + continue;
1871 + } else if (ret < 0) {
1872 nf_ct_unexpect_related(rtp_exp);
1873 port = 0;
1874 break;
1875 diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
1876 index 97e7380..cbc5bfd 100644
1877 --- a/net/netfilter/ipvs/ip_vs_ctl.c
1878 +++ b/net/netfilter/ipvs/ip_vs_ctl.c
1879 @@ -1521,11 +1521,12 @@ static int ip_vs_dst_event(struct notifier_block *this, unsigned long event,
1880 {
1881 struct net_device *dev = ptr;
1882 struct net *net = dev_net(dev);
1883 + struct netns_ipvs *ipvs = net_ipvs(net);
1884 struct ip_vs_service *svc;
1885 struct ip_vs_dest *dest;
1886 unsigned int idx;
1887
1888 - if (event != NETDEV_UNREGISTER)
1889 + if (event != NETDEV_UNREGISTER || !ipvs)
1890 return NOTIFY_DONE;
1891 IP_VS_DBG(3, "%s() dev=%s\n", __func__, dev->name);
1892 EnterFunction(2);
1893 @@ -1551,7 +1552,7 @@ static int ip_vs_dst_event(struct notifier_block *this, unsigned long event,
1894 }
1895 }
1896
1897 - list_for_each_entry(dest, &net_ipvs(net)->dest_trash, n_list) {
1898 + list_for_each_entry(dest, &ipvs->dest_trash, n_list) {
1899 __ip_vs_dev_reset(dest, dev);
1900 }
1901 mutex_unlock(&__ip_vs_mutex);
1902 diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
1903 index 729f157..9a171b2 100644
1904 --- a/net/netfilter/nf_conntrack_core.c
1905 +++ b/net/netfilter/nf_conntrack_core.c
1906 @@ -249,12 +249,15 @@ static void death_by_event(unsigned long ul_conntrack)
1907 {
1908 struct nf_conn *ct = (void *)ul_conntrack;
1909 struct net *net = nf_ct_net(ct);
1910 + struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct);
1911 +
1912 + BUG_ON(ecache == NULL);
1913
1914 if (nf_conntrack_event(IPCT_DESTROY, ct) < 0) {
1915 /* bad luck, let's retry again */
1916 - ct->timeout.expires = jiffies +
1917 + ecache->timeout.expires = jiffies +
1918 (random32() % net->ct.sysctl_events_retry_timeout);
1919 - add_timer(&ct->timeout);
1920 + add_timer(&ecache->timeout);
1921 return;
1922 }
1923 /* we've got the event delivered, now it's dying */
1924 @@ -268,6 +271,9 @@ static void death_by_event(unsigned long ul_conntrack)
1925 void nf_ct_insert_dying_list(struct nf_conn *ct)
1926 {
1927 struct net *net = nf_ct_net(ct);
1928 + struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct);
1929 +
1930 + BUG_ON(ecache == NULL);
1931
1932 /* add this conntrack to the dying list */
1933 spin_lock_bh(&nf_conntrack_lock);
1934 @@ -275,10 +281,10 @@ void nf_ct_insert_dying_list(struct nf_conn *ct)
1935 &net->ct.dying);
1936 spin_unlock_bh(&nf_conntrack_lock);
1937 /* set a new timer to retry event delivery */
1938 - setup_timer(&ct->timeout, death_by_event, (unsigned long)ct);
1939 - ct->timeout.expires = jiffies +
1940 + setup_timer(&ecache->timeout, death_by_event, (unsigned long)ct);
1941 + ecache->timeout.expires = jiffies +
1942 (random32() % net->ct.sysctl_events_retry_timeout);
1943 - add_timer(&ct->timeout);
1944 + add_timer(&ecache->timeout);
1945 }
1946 EXPORT_SYMBOL_GPL(nf_ct_insert_dying_list);
1947
1948 diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
1949 index 4147ba3..e41ec84 100644
1950 --- a/net/netfilter/nf_conntrack_expect.c
1951 +++ b/net/netfilter/nf_conntrack_expect.c
1952 @@ -361,23 +361,6 @@ static void evict_oldest_expect(struct nf_conn *master,
1953 }
1954 }
1955
1956 -static inline int refresh_timer(struct nf_conntrack_expect *i)
1957 -{
1958 - struct nf_conn_help *master_help = nfct_help(i->master);
1959 - const struct nf_conntrack_expect_policy *p;
1960 -
1961 - if (!del_timer(&i->timeout))
1962 - return 0;
1963 -
1964 - p = &rcu_dereference_protected(
1965 - master_help->helper,
1966 - lockdep_is_held(&nf_conntrack_lock)
1967 - )->expect_policy[i->class];
1968 - i->timeout.expires = jiffies + p->timeout * HZ;
1969 - add_timer(&i->timeout);
1970 - return 1;
1971 -}
1972 -
1973 static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
1974 {
1975 const struct nf_conntrack_expect_policy *p;
1976 @@ -386,7 +369,7 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
1977 struct nf_conn_help *master_help = nfct_help(master);
1978 struct nf_conntrack_helper *helper;
1979 struct net *net = nf_ct_exp_net(expect);
1980 - struct hlist_node *n;
1981 + struct hlist_node *n, *next;
1982 unsigned int h;
1983 int ret = 1;
1984
1985 @@ -395,12 +378,12 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
1986 goto out;
1987 }
1988 h = nf_ct_expect_dst_hash(&expect->tuple);
1989 - hlist_for_each_entry(i, n, &net->ct.expect_hash[h], hnode) {
1990 + hlist_for_each_entry_safe(i, n, next, &net->ct.expect_hash[h], hnode) {
1991 if (expect_matches(i, expect)) {
1992 - /* Refresh timer: if it's dying, ignore.. */
1993 - if (refresh_timer(i)) {
1994 - ret = 0;
1995 - goto out;
1996 + if (del_timer(&i->timeout)) {
1997 + nf_ct_unlink_expect(i);
1998 + nf_ct_expect_put(i);
1999 + break;
2000 }
2001 } else if (expect_clash(i, expect)) {
2002 ret = -EBUSY;
2003 diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
2004 index d95f9c9..2195eb0 100644
2005 --- a/net/netfilter/xt_hashlimit.c
2006 +++ b/net/netfilter/xt_hashlimit.c
2007 @@ -389,8 +389,7 @@ static void htable_put(struct xt_hashlimit_htable *hinfo)
2008 #define CREDITS_PER_JIFFY POW2_BELOW32(MAX_CPJ)
2009
2010 /* Precision saver. */
2011 -static inline u_int32_t
2012 -user2credits(u_int32_t user)
2013 +static u32 user2credits(u32 user)
2014 {
2015 /* If multiplying would overflow... */
2016 if (user > 0xFFFFFFFF / (HZ*CREDITS_PER_JIFFY))
2017 @@ -400,7 +399,7 @@ user2credits(u_int32_t user)
2018 return (user * HZ * CREDITS_PER_JIFFY) / XT_HASHLIMIT_SCALE;
2019 }
2020
2021 -static inline void rateinfo_recalc(struct dsthash_ent *dh, unsigned long now)
2022 +static void rateinfo_recalc(struct dsthash_ent *dh, unsigned long now)
2023 {
2024 dh->rateinfo.credit += (now - dh->rateinfo.prev) * CREDITS_PER_JIFFY;
2025 if (dh->rateinfo.credit > dh->rateinfo.credit_cap)
2026 @@ -535,8 +534,7 @@ hashlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
2027 dh->rateinfo.prev = jiffies;
2028 dh->rateinfo.credit = user2credits(hinfo->cfg.avg *
2029 hinfo->cfg.burst);
2030 - dh->rateinfo.credit_cap = user2credits(hinfo->cfg.avg *
2031 - hinfo->cfg.burst);
2032 + dh->rateinfo.credit_cap = dh->rateinfo.credit;
2033 dh->rateinfo.cost = user2credits(hinfo->cfg.avg);
2034 } else {
2035 /* update expiration timeout */
2036 diff --git a/net/netfilter/xt_limit.c b/net/netfilter/xt_limit.c
2037 index 32b7a57..a4c1e45 100644
2038 --- a/net/netfilter/xt_limit.c
2039 +++ b/net/netfilter/xt_limit.c
2040 @@ -88,8 +88,7 @@ limit_mt(const struct sk_buff *skb, struct xt_action_param *par)
2041 }
2042
2043 /* Precision saver. */
2044 -static u_int32_t
2045 -user2credits(u_int32_t user)
2046 +static u32 user2credits(u32 user)
2047 {
2048 /* If multiplying would overflow... */
2049 if (user > 0xFFFFFFFF / (HZ*CREDITS_PER_JIFFY))
2050 @@ -118,12 +117,12 @@ static int limit_mt_check(const struct xt_mtchk_param *par)
2051
2052 /* For SMP, we only want to use one set of state. */
2053 r->master = priv;
2054 + /* User avg in seconds * XT_LIMIT_SCALE: convert to jiffies *
2055 + 128. */
2056 + priv->prev = jiffies;
2057 + priv->credit = user2credits(r->avg * r->burst); /* Credits full. */
2058 if (r->cost == 0) {
2059 - /* User avg in seconds * XT_LIMIT_SCALE: convert to jiffies *
2060 - 128. */
2061 - priv->prev = jiffies;
2062 - priv->credit = user2credits(r->avg * r->burst); /* Credits full. */
2063 - r->credit_cap = user2credits(r->avg * r->burst); /* Credits full. */
2064 + r->credit_cap = priv->credit; /* Credits full. */
2065 r->cost = user2credits(r->avg);
2066 }
2067 return 0;
2068 diff --git a/net/netfilter/xt_set.c b/net/netfilter/xt_set.c
2069 index 0ec8138..c6f7db7 100644
2070 --- a/net/netfilter/xt_set.c
2071 +++ b/net/netfilter/xt_set.c
2072 @@ -16,6 +16,7 @@
2073
2074 #include <linux/netfilter/x_tables.h>
2075 #include <linux/netfilter/xt_set.h>
2076 +#include <linux/netfilter/ipset/ip_set_timeout.h>
2077
2078 MODULE_LICENSE("GPL");
2079 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
2080 @@ -44,6 +45,14 @@ const struct ip_set_adt_opt n = { \
2081 .cmdflags = cfs, \
2082 .timeout = t, \
2083 }
2084 +#define ADT_MOPT(n, f, d, fs, cfs, t) \
2085 +struct ip_set_adt_opt n = { \
2086 + .family = f, \
2087 + .dim = d, \
2088 + .flags = fs, \
2089 + .cmdflags = cfs, \
2090 + .timeout = t, \
2091 +}
2092
2093 /* Revision 0 interface: backward compatible with netfilter/iptables */
2094
2095 @@ -296,11 +305,15 @@ static unsigned int
2096 set_target_v2(struct sk_buff *skb, const struct xt_action_param *par)
2097 {
2098 const struct xt_set_info_target_v2 *info = par->targinfo;
2099 - ADT_OPT(add_opt, par->family, info->add_set.dim,
2100 - info->add_set.flags, info->flags, info->timeout);
2101 + ADT_MOPT(add_opt, par->family, info->add_set.dim,
2102 + info->add_set.flags, info->flags, info->timeout);
2103 ADT_OPT(del_opt, par->family, info->del_set.dim,
2104 info->del_set.flags, 0, UINT_MAX);
2105
2106 + /* Normalize to fit into jiffies */
2107 + if (add_opt.timeout != IPSET_NO_TIMEOUT &&
2108 + add_opt.timeout > UINT_MAX/MSEC_PER_SEC)
2109 + add_opt.timeout = UINT_MAX/MSEC_PER_SEC;
2110 if (info->add_set.index != IPSET_INVALID_ID)
2111 ip_set_add(info->add_set.index, skb, par, &add_opt);
2112 if (info->del_set.index != IPSET_INVALID_ID)
2113 diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
2114 index b88c6bf..00ff343 100644
2115 --- a/net/sunrpc/xprtsock.c
2116 +++ b/net/sunrpc/xprtsock.c
2117 @@ -1028,6 +1028,16 @@ static void xs_udp_data_ready(struct sock *sk, int len)
2118 read_unlock_bh(&sk->sk_callback_lock);
2119 }
2120
2121 +/*
2122 + * Helper function to force a TCP close if the server is sending
2123 + * junk and/or it has put us in CLOSE_WAIT
2124 + */
2125 +static void xs_tcp_force_close(struct rpc_xprt *xprt)
2126 +{
2127 + set_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
2128 + xprt_force_disconnect(xprt);
2129 +}
2130 +
2131 static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_reader *desc)
2132 {
2133 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2134 @@ -1054,7 +1064,7 @@ static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_rea
2135 /* Sanity check of the record length */
2136 if (unlikely(transport->tcp_reclen < 8)) {
2137 dprintk("RPC: invalid TCP record fragment length\n");
2138 - xprt_force_disconnect(xprt);
2139 + xs_tcp_force_close(xprt);
2140 return;
2141 }
2142 dprintk("RPC: reading TCP record fragment of length %d\n",
2143 @@ -1135,7 +1145,7 @@ static inline void xs_tcp_read_calldir(struct sock_xprt *transport,
2144 break;
2145 default:
2146 dprintk("RPC: invalid request message type\n");
2147 - xprt_force_disconnect(&transport->xprt);
2148 + xs_tcp_force_close(&transport->xprt);
2149 }
2150 xs_tcp_check_fraghdr(transport);
2151 }
2152 @@ -1458,6 +1468,8 @@ static void xs_tcp_cancel_linger_timeout(struct rpc_xprt *xprt)
2153 static void xs_sock_mark_closed(struct rpc_xprt *xprt)
2154 {
2155 smp_mb__before_clear_bit();
2156 + clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
2157 + clear_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
2158 clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
2159 clear_bit(XPRT_CLOSING, &xprt->state);
2160 smp_mb__after_clear_bit();
2161 @@ -1515,8 +1527,8 @@ static void xs_tcp_state_change(struct sock *sk)
2162 break;
2163 case TCP_CLOSE_WAIT:
2164 /* The server initiated a shutdown of the socket */
2165 - xprt_force_disconnect(xprt);
2166 xprt->connect_cookie++;
2167 + xs_tcp_force_close(xprt);
2168 case TCP_CLOSING:
2169 /*
2170 * If the server closed down the connection, make sure that
2171 @@ -2159,8 +2171,7 @@ static void xs_tcp_setup_socket(struct work_struct *work)
2172 /* We're probably in TIME_WAIT. Get rid of existing socket,
2173 * and retry
2174 */
2175 - set_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
2176 - xprt_force_disconnect(xprt);
2177 + xs_tcp_force_close(xprt);
2178 break;
2179 case -ECONNREFUSED:
2180 case -ECONNRESET:
2181 diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c
2182 index 9473fca..8b0f996 100644
2183 --- a/sound/pci/ac97/ac97_codec.c
2184 +++ b/sound/pci/ac97/ac97_codec.c
2185 @@ -1271,6 +1271,8 @@ static int snd_ac97_cvol_new(struct snd_card *card, char *name, int reg, unsigne
2186 tmp.index = ac97->num;
2187 kctl = snd_ctl_new1(&tmp, ac97);
2188 }
2189 + if (!kctl)
2190 + return -ENOMEM;
2191 if (reg >= AC97_PHONE && reg <= AC97_PCM)
2192 set_tlv_db_scale(kctl, db_scale_5bit_12db_max);
2193 else
2194 diff --git a/sound/pci/emu10k1/emu10k1_main.c b/sound/pci/emu10k1/emu10k1_main.c
2195 index 7549240..a78fdf4 100644
2196 --- a/sound/pci/emu10k1/emu10k1_main.c
2197 +++ b/sound/pci/emu10k1/emu10k1_main.c
2198 @@ -1416,6 +1416,15 @@ static struct snd_emu_chip_details emu_chip_details[] = {
2199 .ca0108_chip = 1,
2200 .spk71 = 1,
2201 .emu_model = EMU_MODEL_EMU1010B}, /* EMU 1010 new revision */
2202 + /* Tested by Maxim Kachur <mcdebugger@duganet.ru> 17th Oct 2012. */
2203 + /* This is MAEM8986, 0202 is MAEM8980 */
2204 + {.vendor = 0x1102, .device = 0x0008, .subsystem = 0x40071102,
2205 + .driver = "Audigy2", .name = "E-mu 1010 PCIe [MAEM8986]",
2206 + .id = "EMU1010",
2207 + .emu10k2_chip = 1,
2208 + .ca0108_chip = 1,
2209 + .spk71 = 1,
2210 + .emu_model = EMU_MODEL_EMU1010B}, /* EMU 1010 PCIe */
2211 /* Tested by James@superbug.co.uk 8th July 2005. */
2212 /* This is MAEM8810, 0202 is MAEM8820 */
2213 {.vendor = 0x1102, .device = 0x0004, .subsystem = 0x40011102,
2214 diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
2215 index c83ccdb..2bc6c51 100644
2216 --- a/sound/pci/hda/patch_cirrus.c
2217 +++ b/sound/pci/hda/patch_cirrus.c
2218 @@ -1427,7 +1427,7 @@ static int patch_cs420x(struct hda_codec *codec)
2219 return 0;
2220
2221 error:
2222 - kfree(codec->spec);
2223 + cs_free(codec);
2224 codec->spec = NULL;
2225 return err;
2226 }
2227 @@ -1984,7 +1984,7 @@ static int patch_cs4210(struct hda_codec *codec)
2228 return 0;
2229
2230 error:
2231 - kfree(codec->spec);
2232 + cs_free(codec);
2233 codec->spec = NULL;
2234 return err;
2235 }
2236 @@ -2009,7 +2009,7 @@ static int patch_cs4213(struct hda_codec *codec)
2237 return 0;
2238
2239 error:
2240 - kfree(codec->spec);
2241 + cs_free(codec);
2242 codec->spec = NULL;
2243 return err;
2244 }
2245 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
2246 index e7cb4bd..94b765b 100644
2247 --- a/sound/pci/hda/patch_realtek.c
2248 +++ b/sound/pci/hda/patch_realtek.c
2249 @@ -602,6 +602,8 @@ static void alc_line_automute(struct hda_codec *codec)
2250 {
2251 struct alc_spec *spec = codec->spec;
2252
2253 + if (spec->autocfg.line_out_type == AUTO_PIN_SPEAKER_OUT)
2254 + return;
2255 /* check LO jack only when it's different from HP */
2256 if (spec->autocfg.line_out_pins[0] == spec->autocfg.hp_pins[0])
2257 return;
2258 @@ -2663,8 +2665,10 @@ static const char *alc_get_line_out_pfx(struct alc_spec *spec, int ch,
2259 return "PCM";
2260 break;
2261 }
2262 - if (snd_BUG_ON(ch >= ARRAY_SIZE(channel_name)))
2263 + if (ch >= ARRAY_SIZE(channel_name)) {
2264 + snd_BUG();
2265 return "PCM";
2266 + }
2267
2268 return channel_name[ch];
2269 }
2270 diff --git a/sound/soc/codecs/wm2200.c b/sound/soc/codecs/wm2200.c
2271 index 32682c1..c8bff6d 100644
2272 --- a/sound/soc/codecs/wm2200.c
2273 +++ b/sound/soc/codecs/wm2200.c
2274 @@ -1028,7 +1028,7 @@ SOC_DOUBLE_R_TLV("OUT2 Digital Volume", WM2200_DAC_DIGITAL_VOLUME_2L,
2275 WM2200_DAC_DIGITAL_VOLUME_2R, WM2200_OUT2L_VOL_SHIFT, 0x9f, 0,
2276 digital_tlv),
2277 SOC_DOUBLE("OUT2 Switch", WM2200_PDM_1, WM2200_SPK1L_MUTE_SHIFT,
2278 - WM2200_SPK1R_MUTE_SHIFT, 1, 0),
2279 + WM2200_SPK1R_MUTE_SHIFT, 1, 1),
2280 };
2281
2282 WM2200_MIXER_ENUMS(OUT1L, WM2200_OUT1LMIX_INPUT_1_SOURCE);
2283 @@ -2091,6 +2091,7 @@ static __devinit int wm2200_i2c_probe(struct i2c_client *i2c,
2284
2285 switch (wm2200->rev) {
2286 case 0:
2287 + case 1:
2288 ret = regmap_register_patch(wm2200->regmap, wm2200_reva_patch,
2289 ARRAY_SIZE(wm2200_reva_patch));
2290 if (ret != 0) {
2291 diff --git a/sound/soc/omap/omap-abe-twl6040.c b/sound/soc/omap/omap-abe-twl6040.c
2292 index 93bb8ee..9c2f090 100644
2293 --- a/sound/soc/omap/omap-abe-twl6040.c
2294 +++ b/sound/soc/omap/omap-abe-twl6040.c
2295 @@ -181,7 +181,7 @@ static int omap_abe_twl6040_init(struct snd_soc_pcm_runtime *rtd)
2296 twl6040_disconnect_pin(dapm, pdata->has_hf, "Ext Spk");
2297 twl6040_disconnect_pin(dapm, pdata->has_ep, "Earphone Spk");
2298 twl6040_disconnect_pin(dapm, pdata->has_aux, "Line Out");
2299 - twl6040_disconnect_pin(dapm, pdata->has_vibra, "Vinrator");
2300 + twl6040_disconnect_pin(dapm, pdata->has_vibra, "Vibrator");
2301 twl6040_disconnect_pin(dapm, pdata->has_hsmic, "Headset Mic");
2302 twl6040_disconnect_pin(dapm, pdata->has_mainmic, "Main Handset Mic");
2303 twl6040_disconnect_pin(dapm, pdata->has_submic, "Sub Handset Mic");
2304 diff --git a/sound/soc/sh/fsi.c b/sound/soc/sh/fsi.c
2305 index 74ed2df..91b7287 100644
2306 --- a/sound/soc/sh/fsi.c
2307 +++ b/sound/soc/sh/fsi.c
2308 @@ -20,6 +20,7 @@
2309 #include <linux/sh_dma.h>
2310 #include <linux/slab.h>
2311 #include <linux/module.h>
2312 +#include <linux/workqueue.h>
2313 #include <sound/soc.h>
2314 #include <sound/sh_fsi.h>
2315
2316 @@ -199,7 +200,7 @@ struct fsi_stream {
2317 */
2318 struct dma_chan *chan;
2319 struct sh_dmae_slave slave; /* see fsi_handler_init() */
2320 - struct tasklet_struct tasklet;
2321 + struct work_struct work;
2322 dma_addr_t dma;
2323 };
2324
2325 @@ -968,9 +969,9 @@ static dma_addr_t fsi_dma_get_area(struct fsi_stream *io)
2326 return io->dma + samples_to_bytes(runtime, io->buff_sample_pos);
2327 }
2328
2329 -static void fsi_dma_do_tasklet(unsigned long data)
2330 +static void fsi_dma_do_work(struct work_struct *work)
2331 {
2332 - struct fsi_stream *io = (struct fsi_stream *)data;
2333 + struct fsi_stream *io = container_of(work, struct fsi_stream, work);
2334 struct fsi_priv *fsi = fsi_stream_to_priv(io);
2335 struct dma_chan *chan;
2336 struct snd_soc_dai *dai;
2337 @@ -1023,7 +1024,7 @@ static void fsi_dma_do_tasklet(unsigned long data)
2338 * FIXME
2339 *
2340 * In DMAEngine case, codec and FSI cannot be started simultaneously
2341 - * since FSI is using tasklet.
2342 + * since FSI is using the scheduler work queue.
2343 * Therefore, in capture case, probably FSI FIFO will have got
2344 * overflow error in this point.
2345 * in that case, DMA cannot start transfer until error was cleared.
2346 @@ -1047,7 +1048,7 @@ static bool fsi_dma_filter(struct dma_chan *chan, void *param)
2347
2348 static int fsi_dma_transfer(struct fsi_priv *fsi, struct fsi_stream *io)
2349 {
2350 - tasklet_schedule(&io->tasklet);
2351 + schedule_work(&io->work);
2352
2353 return 0;
2354 }
2355 @@ -1087,14 +1088,14 @@ static int fsi_dma_probe(struct fsi_priv *fsi, struct fsi_stream *io)
2356 if (!io->chan)
2357 return -EIO;
2358
2359 - tasklet_init(&io->tasklet, fsi_dma_do_tasklet, (unsigned long)io);
2360 + INIT_WORK(&io->work, fsi_dma_do_work);
2361
2362 return 0;
2363 }
2364
2365 static int fsi_dma_remove(struct fsi_priv *fsi, struct fsi_stream *io)
2366 {
2367 - tasklet_kill(&io->tasklet);
2368 + cancel_work_sync(&io->work);
2369
2370 fsi_stream_stop(fsi, io);
2371

  ViewVC Help
Powered by ViewVC 1.1.20