/[linux-patches]/genpatches-2.6/trunk/2.6.12-pre/1006_linux-2.6.11.7.patch
Gentoo

Contents of /genpatches-2.6/trunk/2.6.12-pre/1006_linux-2.6.11.7.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 4 - (show annotations) (download) (as text)
Sat Jun 11 23:20:41 2005 UTC (15 years, 5 months ago) by dsd
File MIME type: text/x-diff
File size: 11391 byte(s)
Create 2.6.12 branch
1 diff -Nru a/Makefile b/Makefile
2 --- a/Makefile 2005-04-07 11:59:01 -07:00
3 +++ b/Makefile 2005-04-07 11:59:01 -07:00
4 @@ -1,7 +1,7 @@
5 VERSION = 2
6 PATCHLEVEL = 6
7 SUBLEVEL = 11
8 -EXTRAVERSION = .6
9 +EXTRAVERSION = .7
10 NAME=Woozy Numbat
11
12 # *DOCUMENTATION*
13 diff -Nru a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S
14 --- a/arch/ia64/kernel/fsys.S 2005-04-07 11:59:01 -07:00
15 +++ b/arch/ia64/kernel/fsys.S 2005-04-07 11:59:01 -07:00
16 @@ -611,8 +611,10 @@
17 movl r2=ia64_ret_from_syscall
18 ;;
19 mov rp=r2 // set the real return addr
20 - tbit.z p8,p0=r3,TIF_SYSCALL_TRACE
21 + and r3=_TIF_SYSCALL_TRACEAUDIT,r3
22 ;;
23 + cmp.eq p8,p0=r3,r0
24 +
25 (p10) br.cond.spnt.many ia64_ret_from_syscall // p10==true means out registers are more than 8
26 (p8) br.call.sptk.many b6=b6 // ignore this return addr
27 br.cond.sptk ia64_trace_syscall
28 diff -Nru a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c
29 --- a/arch/ia64/kernel/signal.c 2005-04-07 11:59:01 -07:00
30 +++ b/arch/ia64/kernel/signal.c 2005-04-07 11:59:01 -07:00
31 @@ -224,7 +224,8 @@
32 * could be corrupted.
33 */
34 retval = (long) &ia64_leave_kernel;
35 - if (test_thread_flag(TIF_SYSCALL_TRACE))
36 + if (test_thread_flag(TIF_SYSCALL_TRACE)
37 + || test_thread_flag(TIF_SYSCALL_AUDIT))
38 /*
39 * strace expects to be notified after sigreturn returns even though the
40 * context to which we return may not be in the middle of a syscall.
41 diff -Nru a/arch/um/kernel/skas/uaccess.c b/arch/um/kernel/skas/uaccess.c
42 --- a/arch/um/kernel/skas/uaccess.c 2005-04-07 11:59:01 -07:00
43 +++ b/arch/um/kernel/skas/uaccess.c 2005-04-07 11:59:01 -07:00
44 @@ -61,7 +61,8 @@
45 void *arg;
46 int *res;
47
48 - va_copy(args, *(va_list *)arg_ptr);
49 + /* Some old gccs recognize __va_copy, but not va_copy */
50 + __va_copy(args, *(va_list *)arg_ptr);
51 addr = va_arg(args, unsigned long);
52 len = va_arg(args, int);
53 is_write = va_arg(args, int);
54 diff -Nru a/drivers/i2c/chips/eeprom.c b/drivers/i2c/chips/eeprom.c
55 --- a/drivers/i2c/chips/eeprom.c 2005-04-07 11:59:01 -07:00
56 +++ b/drivers/i2c/chips/eeprom.c 2005-04-07 11:59:01 -07:00
57 @@ -130,7 +130,8 @@
58
59 /* Hide Vaio security settings to regular users (16 first bytes) */
60 if (data->nature == VAIO && off < 16 && !capable(CAP_SYS_ADMIN)) {
61 - int in_row1 = 16 - off;
62 + size_t in_row1 = 16 - off;
63 + in_row1 = min(in_row1, count);
64 memset(buf, 0, in_row1);
65 if (count - in_row1 > 0)
66 memcpy(buf + in_row1, &data->data[16], count - in_row1);
67 diff -Nru a/fs/jbd/transaction.c b/fs/jbd/transaction.c
68 --- a/fs/jbd/transaction.c 2005-04-07 11:59:01 -07:00
69 +++ b/fs/jbd/transaction.c 2005-04-07 11:59:01 -07:00
70 @@ -1775,10 +1775,10 @@
71 JBUFFER_TRACE(jh, "checkpointed: add to BJ_Forget");
72 ret = __dispose_buffer(jh,
73 journal->j_running_transaction);
74 + journal_put_journal_head(jh);
75 spin_unlock(&journal->j_list_lock);
76 jbd_unlock_bh_state(bh);
77 spin_unlock(&journal->j_state_lock);
78 - journal_put_journal_head(jh);
79 return ret;
80 } else {
81 /* There is no currently-running transaction. So the
82 @@ -1789,10 +1789,10 @@
83 JBUFFER_TRACE(jh, "give to committing trans");
84 ret = __dispose_buffer(jh,
85 journal->j_committing_transaction);
86 + journal_put_journal_head(jh);
87 spin_unlock(&journal->j_list_lock);
88 jbd_unlock_bh_state(bh);
89 spin_unlock(&journal->j_state_lock);
90 - journal_put_journal_head(jh);
91 return ret;
92 } else {
93 /* The orphan record's transaction has
94 @@ -1813,10 +1813,10 @@
95 journal->j_running_transaction);
96 jh->b_next_transaction = NULL;
97 }
98 + journal_put_journal_head(jh);
99 spin_unlock(&journal->j_list_lock);
100 jbd_unlock_bh_state(bh);
101 spin_unlock(&journal->j_state_lock);
102 - journal_put_journal_head(jh);
103 return 0;
104 } else {
105 /* Good, the buffer belongs to the running transaction.
106 diff -Nru a/lib/rwsem-spinlock.c b/lib/rwsem-spinlock.c
107 --- a/lib/rwsem-spinlock.c 2005-04-07 11:59:01 -07:00
108 +++ b/lib/rwsem-spinlock.c 2005-04-07 11:59:01 -07:00
109 @@ -140,12 +140,12 @@
110
111 rwsemtrace(sem, "Entering __down_read");
112
113 - spin_lock(&sem->wait_lock);
114 + spin_lock_irq(&sem->wait_lock);
115
116 if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
117 /* granted */
118 sem->activity++;
119 - spin_unlock(&sem->wait_lock);
120 + spin_unlock_irq(&sem->wait_lock);
121 goto out;
122 }
123
124 @@ -160,7 +160,7 @@
125 list_add_tail(&waiter.list, &sem->wait_list);
126
127 /* we don't need to touch the semaphore struct anymore */
128 - spin_unlock(&sem->wait_lock);
129 + spin_unlock_irq(&sem->wait_lock);
130
131 /* wait to be given the lock */
132 for (;;) {
133 @@ -181,10 +181,12 @@
134 */
135 int fastcall __down_read_trylock(struct rw_semaphore *sem)
136 {
137 + unsigned long flags;
138 int ret = 0;
139 +
140 rwsemtrace(sem, "Entering __down_read_trylock");
141
142 - spin_lock(&sem->wait_lock);
143 + spin_lock_irqsave(&sem->wait_lock, flags);
144
145 if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
146 /* granted */
147 @@ -192,7 +194,7 @@
148 ret = 1;
149 }
150
151 - spin_unlock(&sem->wait_lock);
152 + spin_unlock_irqrestore(&sem->wait_lock, flags);
153
154 rwsemtrace(sem, "Leaving __down_read_trylock");
155 return ret;
156 @@ -209,12 +211,12 @@
157
158 rwsemtrace(sem, "Entering __down_write");
159
160 - spin_lock(&sem->wait_lock);
161 + spin_lock_irq(&sem->wait_lock);
162
163 if (sem->activity == 0 && list_empty(&sem->wait_list)) {
164 /* granted */
165 sem->activity = -1;
166 - spin_unlock(&sem->wait_lock);
167 + spin_unlock_irq(&sem->wait_lock);
168 goto out;
169 }
170
171 @@ -229,7 +231,7 @@
172 list_add_tail(&waiter.list, &sem->wait_list);
173
174 /* we don't need to touch the semaphore struct anymore */
175 - spin_unlock(&sem->wait_lock);
176 + spin_unlock_irq(&sem->wait_lock);
177
178 /* wait to be given the lock */
179 for (;;) {
180 @@ -250,10 +252,12 @@
181 */
182 int fastcall __down_write_trylock(struct rw_semaphore *sem)
183 {
184 + unsigned long flags;
185 int ret = 0;
186 +
187 rwsemtrace(sem, "Entering __down_write_trylock");
188
189 - spin_lock(&sem->wait_lock);
190 + spin_lock_irqsave(&sem->wait_lock, flags);
191
192 if (sem->activity == 0 && list_empty(&sem->wait_list)) {
193 /* granted */
194 @@ -261,7 +265,7 @@
195 ret = 1;
196 }
197
198 - spin_unlock(&sem->wait_lock);
199 + spin_unlock_irqrestore(&sem->wait_lock, flags);
200
201 rwsemtrace(sem, "Leaving __down_write_trylock");
202 return ret;
203 @@ -272,14 +276,16 @@
204 */
205 void fastcall __up_read(struct rw_semaphore *sem)
206 {
207 + unsigned long flags;
208 +
209 rwsemtrace(sem, "Entering __up_read");
210
211 - spin_lock(&sem->wait_lock);
212 + spin_lock_irqsave(&sem->wait_lock, flags);
213
214 if (--sem->activity == 0 && !list_empty(&sem->wait_list))
215 sem = __rwsem_wake_one_writer(sem);
216
217 - spin_unlock(&sem->wait_lock);
218 + spin_unlock_irqrestore(&sem->wait_lock, flags);
219
220 rwsemtrace(sem, "Leaving __up_read");
221 }
222 @@ -289,15 +295,17 @@
223 */
224 void fastcall __up_write(struct rw_semaphore *sem)
225 {
226 + unsigned long flags;
227 +
228 rwsemtrace(sem, "Entering __up_write");
229
230 - spin_lock(&sem->wait_lock);
231 + spin_lock_irqsave(&sem->wait_lock, flags);
232
233 sem->activity = 0;
234 if (!list_empty(&sem->wait_list))
235 sem = __rwsem_do_wake(sem, 1);
236
237 - spin_unlock(&sem->wait_lock);
238 + spin_unlock_irqrestore(&sem->wait_lock, flags);
239
240 rwsemtrace(sem, "Leaving __up_write");
241 }
242 @@ -308,15 +316,17 @@
243 */
244 void fastcall __downgrade_write(struct rw_semaphore *sem)
245 {
246 + unsigned long flags;
247 +
248 rwsemtrace(sem, "Entering __downgrade_write");
249
250 - spin_lock(&sem->wait_lock);
251 + spin_lock_irqsave(&sem->wait_lock, flags);
252
253 sem->activity = 1;
254 if (!list_empty(&sem->wait_list))
255 sem = __rwsem_do_wake(sem, 0);
256
257 - spin_unlock(&sem->wait_lock);
258 + spin_unlock_irqrestore(&sem->wait_lock, flags);
259
260 rwsemtrace(sem, "Leaving __downgrade_write");
261 }
262 diff -Nru a/lib/rwsem.c b/lib/rwsem.c
263 --- a/lib/rwsem.c 2005-04-07 11:59:01 -07:00
264 +++ b/lib/rwsem.c 2005-04-07 11:59:01 -07:00
265 @@ -150,7 +150,7 @@
266 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
267
268 /* set up my own style of waitqueue */
269 - spin_lock(&sem->wait_lock);
270 + spin_lock_irq(&sem->wait_lock);
271 waiter->task = tsk;
272 get_task_struct(tsk);
273
274 @@ -163,7 +163,7 @@
275 if (!(count & RWSEM_ACTIVE_MASK))
276 sem = __rwsem_do_wake(sem, 0);
277
278 - spin_unlock(&sem->wait_lock);
279 + spin_unlock_irq(&sem->wait_lock);
280
281 /* wait to be given the lock */
282 for (;;) {
283 @@ -219,15 +219,17 @@
284 */
285 struct rw_semaphore fastcall *rwsem_wake(struct rw_semaphore *sem)
286 {
287 + unsigned long flags;
288 +
289 rwsemtrace(sem, "Entering rwsem_wake");
290
291 - spin_lock(&sem->wait_lock);
292 + spin_lock_irqsave(&sem->wait_lock, flags);
293
294 /* do nothing if list empty */
295 if (!list_empty(&sem->wait_list))
296 sem = __rwsem_do_wake(sem, 0);
297
298 - spin_unlock(&sem->wait_lock);
299 + spin_unlock_irqrestore(&sem->wait_lock, flags);
300
301 rwsemtrace(sem, "Leaving rwsem_wake");
302
303 @@ -241,15 +243,17 @@
304 */
305 struct rw_semaphore fastcall *rwsem_downgrade_wake(struct rw_semaphore *sem)
306 {
307 + unsigned long flags;
308 +
309 rwsemtrace(sem, "Entering rwsem_downgrade_wake");
310
311 - spin_lock(&sem->wait_lock);
312 + spin_lock_irqsave(&sem->wait_lock, flags);
313
314 /* do nothing if list empty */
315 if (!list_empty(&sem->wait_list))
316 sem = __rwsem_do_wake(sem, 1);
317
318 - spin_unlock(&sem->wait_lock);
319 + spin_unlock_irqrestore(&sem->wait_lock, flags);
320
321 rwsemtrace(sem, "Leaving rwsem_downgrade_wake");
322 return sem;
323 diff -Nru a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
324 --- a/net/ipv4/tcp_input.c 2005-04-07 11:59:01 -07:00
325 +++ b/net/ipv4/tcp_input.c 2005-04-07 11:59:01 -07:00
326 @@ -1653,7 +1653,10 @@
327 static void tcp_undo_cwr(struct tcp_sock *tp, int undo)
328 {
329 if (tp->prior_ssthresh) {
330 - tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh<<1);
331 + if (tcp_is_bic(tp))
332 + tp->snd_cwnd = max(tp->snd_cwnd, tp->bictcp.last_max_cwnd);
333 + else
334 + tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh<<1);
335
336 if (undo && tp->prior_ssthresh > tp->snd_ssthresh) {
337 tp->snd_ssthresh = tp->prior_ssthresh;
338 diff -Nru a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c
339 --- a/net/ipv4/xfrm4_output.c 2005-04-07 11:59:01 -07:00
340 +++ b/net/ipv4/xfrm4_output.c 2005-04-07 11:59:01 -07:00
341 @@ -103,16 +103,16 @@
342 goto error_nolock;
343 }
344
345 - spin_lock_bh(&x->lock);
346 - err = xfrm_state_check(x, skb);
347 - if (err)
348 - goto error;
349 -
350 if (x->props.mode) {
351 err = xfrm4_tunnel_check_size(skb);
352 if (err)
353 - goto error;
354 + goto error_nolock;
355 }
356 +
357 + spin_lock_bh(&x->lock);
358 + err = xfrm_state_check(x, skb);
359 + if (err)
360 + goto error;
361
362 xfrm4_encap(skb);
363
364 diff -Nru a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
365 --- a/net/ipv6/xfrm6_output.c 2005-04-07 11:59:01 -07:00
366 +++ b/net/ipv6/xfrm6_output.c 2005-04-07 11:59:01 -07:00
367 @@ -103,16 +103,16 @@
368 goto error_nolock;
369 }
370
371 - spin_lock_bh(&x->lock);
372 - err = xfrm_state_check(x, skb);
373 - if (err)
374 - goto error;
375 -
376 if (x->props.mode) {
377 err = xfrm6_tunnel_check_size(skb);
378 if (err)
379 - goto error;
380 + goto error_nolock;
381 }
382 +
383 + spin_lock_bh(&x->lock);
384 + err = xfrm_state_check(x, skb);
385 + if (err)
386 + goto error;
387
388 xfrm6_encap(skb);
389
390 diff -Nru a/sound/core/timer.c b/sound/core/timer.c
391 --- a/sound/core/timer.c 2005-04-07 11:59:01 -07:00
392 +++ b/sound/core/timer.c 2005-04-07 11:59:01 -07:00
393 @@ -1117,7 +1117,8 @@
394 if (tu->qused >= tu->queue_size) {
395 tu->overrun++;
396 } else {
397 - memcpy(&tu->queue[tu->qtail++], tread, sizeof(*tread));
398 + memcpy(&tu->tqueue[tu->qtail++], tread, sizeof(*tread));
399 + tu->qtail %= tu->queue_size;
400 tu->qused++;
401 }
402 }
403 @@ -1140,6 +1141,8 @@
404 spin_lock(&tu->qlock);
405 snd_timer_user_append_to_tqueue(tu, &r1);
406 spin_unlock(&tu->qlock);
407 + kill_fasync(&tu->fasync, SIGIO, POLL_IN);
408 + wake_up(&tu->qchange_sleep);
409 }
410
411 static void snd_timer_user_tinterrupt(snd_timer_instance_t *timeri,

  ViewVC Help
Powered by ViewVC 1.1.20