/[linux-patches]/genpatches-2.6/historical/2.6.2/420_adaptec_dpt_i2o.patch
Gentoo

Contents of /genpatches-2.6/historical/2.6.2/420_adaptec_dpt_i2o.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2 - (show annotations) (download) (as text)
Sat Jun 11 23:16:54 2005 UTC (15 years ago) by dsd
File MIME type: text/x-diff
File size: 146858 byte(s)
Import historical releases
1 diff -ruN linux-2.6.1/Documentation/scsi/dpti.txt linux-2.6.1-adaptec/Documentation/scsi/dpti.txt
2 --- linux-2.6.1/Documentation/scsi/dpti.txt 2004-01-09 01:59:48.000000000 -0500
3 +++ linux-2.6.1-adaptec/Documentation/scsi/dpti.txt 2004-01-27 14:10:46.819597608 -0500
4 @@ -1,4 +1,4 @@
5 - /* TERMS AND CONDITIONS OF USE
6 +/* TERMS AND CONDITIONS OF USE
7 *
8 * Redistribution and use in source form, with or without modification, are
9 * permitted provided that redistributions of source code must retain the
10 @@ -56,8 +56,13 @@
11 * FW without having to reboot)
12 * Changed proc output
13 *
14 + * V2.5
15 + * Changes:
16 + * Added 64 bit Scatter Gather when compiled on big memory aaccess
17 + * architectures.
18 + * Altered driver in support of 2.5+ kernels
19 + *
20 * TODO:
21 - * Add 64 bit Scatter Gather when compiled on 64 bit architectures
22 * Add sparse lun scanning
23 * Add code that checks if a device that had been taken offline is
24 * now online (at the FW level) when test unit ready or inquiry
25 @@ -75,9 +80,25 @@
26 * to the board.
27 *
28 * The files dpti_ioctl.h dptsig.h osd_defs.h osd_util.h sys_info.h are part of the
29 - * interface files for Adaptec's management routines. These define the structures used
30 + * interface files for Adaptecs managment routines. These define the structures used
31 * in the ioctls. They are written to be portable. They are hard to read, but I need
32 * to use them 'as is' or I can miss changes in the interface.
33 *
34 + * Cards supported:
35 + * PM2554
36 + * PM2654
37 + * PM2564
38 + * PM3754
39 + * PM3755
40 + * PM3757
41 + * 3200S
42 + * 3400S
43 + * 3210S
44 + * 3410S
45 + * 2000S
46 + * 2005S
47 + * 2015S
48 + * 2020S
49 + * 2400A
50 */
51
52 diff -ruN linux-2.6.1/drivers/scsi/Kconfig linux-2.6.1-adaptec/drivers/scsi/Kconfig
53 --- linux-2.6.1/drivers/scsi/Kconfig 2004-01-09 02:00:02.000000000 -0500
54 +++ linux-2.6.1-adaptec/drivers/scsi/Kconfig 2004-01-27 14:10:10.800073408 -0500
55 @@ -344,7 +344,7 @@
56 # All the I2O code and drivers do not seem to be 64bit safe.
57 config SCSI_DPT_I2O
58 tristate "Adaptec I2O RAID support "
59 - depends on !64BIT && SCSI && BROKEN
60 + depends on SCSI
61 help
62 This driver supports all of Adaptec's I2O based RAID controllers as
63 well as the DPT SmartRaid V cards. This is an Adaptec maintained
64 diff -ruN linux-2.6.1/drivers/scsi/dpt/dpt_osdutil.h linux-2.6.1-adaptec/drivers/scsi/dpt/dpt_osdutil.h
65 --- linux-2.6.1/drivers/scsi/dpt/dpt_osdutil.h 2004-01-09 02:00:04.000000000 -0500
66 +++ linux-2.6.1-adaptec/drivers/scsi/dpt/dpt_osdutil.h 1969-12-31 19:00:00.000000000 -0500
67 @@ -1,358 +0,0 @@
68 -/* BSDI osd_util.h,v 1.8 1998/06/03 19:14:58 karels Exp */
69 -
70 -/*
71 - * Copyright (c) 1996-1999 Distributed Processing Technology Corporation
72 - * All rights reserved.
73 - *
74 - * Redistribution and use in source form, with or without modification, are
75 - * permitted provided that redistributions of source code must retain the
76 - * above copyright notice, this list of conditions and the following disclaimer.
77 - *
78 - * This software is provided `as is' by Distributed Processing Technology and
79 - * any express or implied warranties, including, but not limited to, the
80 - * implied warranties of merchantability and fitness for a particular purpose,
81 - * are disclaimed. In no event shall Distributed Processing Technology be
82 - * liable for any direct, indirect, incidental, special, exemplary or
83 - * consequential damages (including, but not limited to, procurement of
84 - * substitute goods or services; loss of use, data, or profits; or business
85 - * interruptions) however caused and on any theory of liability, whether in
86 - * contract, strict liability, or tort (including negligence or otherwise)
87 - * arising in any way out of the use of this driver software, even if advised
88 - * of the possibility of such damage.
89 - *
90 - */
91 -
92 -#ifndef __OSD_UTIL_H
93 -#define __OSD_UTIL_H
94 -
95 -/*File - OSD_UTIL.H
96 - ****************************************************************************
97 - *
98 - *Description:
99 - *
100 - * This file contains defines and function prototypes that are
101 - *operating system dependent. The resources defined in this file
102 - *are not specific to any particular application.
103 - *
104 - *Copyright Distributed Processing Technology, Corp.
105 - * 140 Candace Dr.
106 - * Maitland, Fl. 32751 USA
107 - * Phone: (407) 830-5522 Fax: (407) 260-5366
108 - * All Rights Reserved
109 - *
110 - *Author: Doug Anderson
111 - *Date: 1/7/94
112 - *
113 - *Editors:
114 - *
115 - *Remarks:
116 - *
117 - *
118 - *****************************************************************************/
119 -
120 -
121 -/*Definitions - Defines & Constants ----------------------------------------- */
122 -
123 -/*----------------------------- */
124 -/* Operating system selections: */
125 -/*----------------------------- */
126 -
127 -/*#define _DPT_MSDOS */
128 -/*#define _DPT_WIN_3X */
129 -/*#define _DPT_WIN_4X */
130 -/*#define _DPT_WIN_NT */
131 -/*#define _DPT_NETWARE */
132 -/*#define _DPT_OS2 */
133 -/*#define _DPT_SCO */
134 -/*#define _DPT_UNIXWARE */
135 -/*#define _DPT_SOLARIS */
136 -/*#define _DPT_NEXTSTEP */
137 -/*#define _DPT_BANYAN */
138 -
139 -/*-------------------------------- */
140 -/* Include the OS specific defines */
141 -/*-------------------------------- */
142 -
143 -/*#define OS_SELECTION From Above List */
144 -/*#define SEMAPHORE_T ??? */
145 -/*#define DLL_HANDLE_T ??? */
146 -
147 -#if (defined(KERNEL) && (defined(__FreeBSD__) || defined(__bsdi__)))
148 -# include "i386/isa/dpt_osd_defs.h"
149 -#else
150 -# include "osd_defs.h"
151 -#endif
152 -
153 -#ifndef DPT_UNALIGNED
154 - #define DPT_UNALIGNED
155 -#endif
156 -
157 -#ifndef DPT_EXPORT
158 - #define DPT_EXPORT
159 -#endif
160 -
161 -#ifndef DPT_IMPORT
162 - #define DPT_IMPORT
163 -#endif
164 -
165 -#ifndef DPT_RUNTIME_IMPORT
166 - #define DPT_RUNTIME_IMPORT DPT_IMPORT
167 -#endif
168 -
169 -/*--------------------- */
170 -/* OS dependent defines */
171 -/*--------------------- */
172 -
173 -#if defined (_DPT_MSDOS) || defined (_DPT_WIN_3X)
174 - #define _DPT_16_BIT
175 -#else
176 - #define _DPT_32_BIT
177 -#endif
178 -
179 -#if defined (_DPT_SCO) || defined (_DPT_UNIXWARE) || defined (_DPT_SOLARIS) || defined (_DPT_AIX) || defined (SNI_MIPS) || defined (_DPT_BSDI) || defined (_DPT_FREE_BSD) || defined(_DPT_LINUX)
180 - #define _DPT_UNIX
181 -#endif
182 -
183 -#if defined (_DPT_WIN_3x) || defined (_DPT_WIN_4X) || defined (_DPT_WIN_NT) \
184 - || defined (_DPT_OS2)
185 - #define _DPT_DLL_SUPPORT
186 -#endif
187 -
188 -#if !defined (_DPT_MSDOS) && !defined (_DPT_WIN_3X) && !defined (_DPT_NETWARE)
189 - #define _DPT_PREEMPTIVE
190 -#endif
191 -
192 -#if !defined (_DPT_MSDOS) && !defined (_DPT_WIN_3X)
193 - #define _DPT_MULTI_THREADED
194 -#endif
195 -
196 -#if !defined (_DPT_MSDOS)
197 - #define _DPT_MULTI_TASKING
198 -#endif
199 -
200 - /* These exist for platforms that */
201 - /* chunk when accessing mis-aligned */
202 - /* data */
203 -#if defined (SNI_MIPS) || defined (_DPT_SOLARIS)
204 - #if defined (_DPT_BIG_ENDIAN)
205 - #if !defined (_DPT_STRICT_ALIGN)
206 - #define _DPT_STRICT_ALIGN
207 - #endif
208 - #endif
209 -#endif
210 -
211 - /* Determine if in C or C++ mode */
212 -#ifdef __cplusplus
213 - #define _DPT_CPP
214 -#else
215 - #define _DPT_C
216 -#endif
217 -
218 -/*-------------------------------------------------------------------*/
219 -/* Under Solaris the compiler refuses to accept code like: */
220 -/* { {"DPT"}, 0, NULL .... }, */
221 -/* and complains about the {"DPT"} part by saying "cannot use { } */
222 -/* to initialize char*". */
223 -/* */
224 -/* By defining these ugly macros we can get around this and also */
225 -/* not have to copy and #ifdef large sections of code. I know that */
226 -/* these macros are *really* ugly, but they should help reduce */
227 -/* maintenance in the long run. */
228 -/* */
229 -/*-------------------------------------------------------------------*/
230 -#if !defined (DPTSQO)
231 - #if defined (_DPT_SOLARIS)
232 - #define DPTSQO
233 - #define DPTSQC
234 - #else
235 - #define DPTSQO {
236 - #define DPTSQC }
237 - #endif /* solaris */
238 -#endif /* DPTSQO */
239 -
240 -
241 -/*---------------------- */
242 -/* OS dependent typedefs */
243 -/*---------------------- */
244 -
245 -#if defined (_DPT_MSDOS) || defined (_DPT_SCO)
246 - #define BYTE unsigned char
247 - #define WORD unsigned short
248 -#endif
249 -
250 -#ifndef _DPT_TYPEDEFS
251 - #define _DPT_TYPEDEFS
252 - typedef unsigned char uCHAR;
253 - typedef unsigned short uSHORT;
254 - typedef unsigned int uINT;
255 - typedef unsigned long uLONG;
256 -
257 - typedef union {
258 - uCHAR u8[4];
259 - uSHORT u16[2];
260 - uLONG u32;
261 - } access_U;
262 -#endif
263 -
264 -#if !defined (NULL)
265 - #define NULL 0
266 -#endif
267 -
268 -
269 -/*Prototypes - function ----------------------------------------------------- */
270 -
271 -#ifdef __cplusplus
272 - extern "C" { /* Declare all these functions as "C" functions */
273 -#endif
274 -
275 -/*------------------------ */
276 -/* Byte reversal functions */
277 -/*------------------------ */
278 -
279 - /* Reverses the byte ordering of a 2 byte variable */
280 -#if (!defined(osdSwap2))
281 - uSHORT osdSwap2(DPT_UNALIGNED uSHORT *);
282 -#endif // !osdSwap2
283 -
284 - /* Reverses the byte ordering of a 4 byte variable and shifts left 8 bits */
285 -#if (!defined(osdSwap3))
286 - uLONG osdSwap3(DPT_UNALIGNED uLONG *);
287 -#endif // !osdSwap3
288 -
289 -
290 -#ifdef _DPT_NETWARE
291 - #include "novpass.h" /* For DPT_Bswapl() prototype */
292 - /* Inline the byte swap */
293 - #ifdef __cplusplus
294 - inline uLONG osdSwap4(uLONG *inLong) {
295 - return *inLong = DPT_Bswapl(*inLong);
296 - }
297 - #else
298 - #define osdSwap4(inLong) DPT_Bswapl(inLong)
299 - #endif // cplusplus
300 -#else
301 - /* Reverses the byte ordering of a 4 byte variable */
302 -# if (!defined(osdSwap4))
303 - uLONG osdSwap4(DPT_UNALIGNED uLONG *);
304 -# endif // !osdSwap4
305 -
306 - /* The following functions ALWAYS swap regardless of the *
307 - * presence of DPT_BIG_ENDIAN */
308 -
309 - uSHORT trueSwap2(DPT_UNALIGNED uSHORT *);
310 - uLONG trueSwap4(DPT_UNALIGNED uLONG *);
311 -
312 -#endif // netware
313 -
314 -
315 -/*-------------------------------------*
316 - * Network order swap functions *
317 - * *
318 - * These functions/macros will be used *
319 - * by the structure insert()/extract() *
320 - * functions. *
321 - *
322 - * We will enclose all structure *
323 - * portability modifications inside *
324 - * #ifdefs. When we are ready, we *
325 - * will #define DPT_PORTABLE to begin *
326 - * using the modifications. *
327 - *-------------------------------------*/
328 -uLONG netSwap4(uLONG val);
329 -
330 -#if defined (_DPT_BIG_ENDIAN)
331 -
332 -// for big-endian we need to swap
333 -
334 -#ifndef NET_SWAP_2
335 -#define NET_SWAP_2(x) (((x) >> 8) | ((x) << 8))
336 -#endif // NET_SWAP_2
337 -
338 -#ifndef NET_SWAP_4
339 -#define NET_SWAP_4(x) netSwap4((x))
340 -#endif // NET_SWAP_4
341 -
342 -#else
343 -
344 -// for little-endian we don't need to do anything
345 -
346 -#ifndef NET_SWAP_2
347 -#define NET_SWAP_2(x) (x)
348 -#endif // NET_SWAP_2
349 -
350 -#ifndef NET_SWAP_4
351 -#define NET_SWAP_4(x) (x)
352 -#endif // NET_SWAP_4
353 -
354 -#endif // big endian
355 -
356 -
357 -
358 -/*----------------------------------- */
359 -/* Run-time loadable module functions */
360 -/*----------------------------------- */
361 -
362 - /* Loads the specified run-time loadable DLL */
363 -DLL_HANDLE_T osdLoadModule(uCHAR *);
364 - /* Unloads the specified run-time loadable DLL */
365 -uSHORT osdUnloadModule(DLL_HANDLE_T);
366 - /* Returns a pointer to a function inside a run-time loadable DLL */
367 -void * osdGetFnAddr(DLL_HANDLE_T,uCHAR *);
368 -
369 -/*--------------------------------------- */
370 -/* Mutually exclusive semaphore functions */
371 -/*--------------------------------------- */
372 -
373 - /* Create a named semaphore */
374 -SEMAPHORE_T osdCreateNamedSemaphore(char *);
375 - /* Create a mutually exlusive semaphore */
376 -SEMAPHORE_T osdCreateSemaphore(void);
377 - /* create an event semaphore */
378 -SEMAPHORE_T osdCreateEventSemaphore(void);
379 - /* create a named event semaphore */
380 -SEMAPHORE_T osdCreateNamedEventSemaphore(char *);
381 -
382 - /* Destroy the specified mutually exclusive semaphore object */
383 -uSHORT osdDestroySemaphore(SEMAPHORE_T);
384 - /* Request access to the specified mutually exclusive semaphore */
385 -uLONG osdRequestSemaphore(SEMAPHORE_T,uLONG);
386 - /* Release access to the specified mutually exclusive semaphore */
387 -uSHORT osdReleaseSemaphore(SEMAPHORE_T);
388 - /* wait for a event to happen */
389 -uLONG osdWaitForEventSemaphore(SEMAPHORE_T, uLONG);
390 - /* signal an event */
391 -uLONG osdSignalEventSemaphore(SEMAPHORE_T);
392 - /* reset the event */
393 -uLONG osdResetEventSemaphore(SEMAPHORE_T);
394 -
395 -/*----------------- */
396 -/* Thread functions */
397 -/*----------------- */
398 -
399 - /* Releases control to the task switcher in non-preemptive */
400 - /* multitasking operating systems. */
401 -void osdSwitchThreads(void);
402 -
403 - /* Starts a thread function */
404 -uLONG osdStartThread(void *,void *);
405 -
406 -/* what is my thread id */
407 -uLONG osdGetThreadID(void);
408 -
409 -/* wakes up the specifed thread */
410 -void osdWakeThread(uLONG);
411 -
412 -/* osd sleep for x miliseconds */
413 -void osdSleep(uLONG);
414 -
415 -#define DPT_THREAD_PRIORITY_LOWEST 0x00
416 -#define DPT_THREAD_PRIORITY_NORMAL 0x01
417 -#define DPT_THREAD_PRIORITY_HIGHEST 0x02
418 -
419 -uCHAR osdSetThreadPriority(uLONG tid, uCHAR priority);
420 -
421 -#ifdef __cplusplus
422 - } /* end the xtern "C" declaration */
423 -#endif
424 -
425 -#endif /* osd_util_h */
426 diff -ruN linux-2.6.1/drivers/scsi/dpt/dpti_i2o-dev.h linux-2.6.1-adaptec/drivers/scsi/dpt/dpti_i2o-dev.h
427 --- linux-2.6.1/drivers/scsi/dpt/dpti_i2o-dev.h 1969-12-31 19:00:00.000000000 -0500
428 +++ linux-2.6.1-adaptec/drivers/scsi/dpt/dpti_i2o-dev.h 2004-01-27 14:08:37.104317328 -0500
429 @@ -0,0 +1,395 @@
430 +/*
431 + * I2O user space accessible structures/APIs
432 + *
433 + * (c) Copyright 1999, 2000 Red Hat Software
434 + *
435 + * This program is free software; you can redistribute it and/or
436 + * modify it under the terms of the GNU General Public License
437 + * as published by the Free Software Foundation; either version
438 + * 2 of the License, or (at your option) any later version.
439 + *
440 + *************************************************************************
441 + *
442 + * This header file defines the I2O APIs that are available to both
443 + * the kernel and user level applications. Kernel specific structures
444 + * are defined in i2o_osm. OSMs should include _only_ i2o_osm.h which
445 + * automatically includs this file.
446 + *
447 + */
448 +
449 +#ifndef _I2O_DEV_H
450 +#define _I2O_DEV_H
451 +
452 +
453 +#include <linux/ioctl.h>
454 +
455 +/*
456 + * I2O Control IOCTLs and structures
457 + */
458 +#define I2O_MAGIC_NUMBER 'i'
459 +#define I2OGETIOPS _IOR(I2O_MAGIC_NUMBER,0,u8[MAX_I2O_CONTROLLERS])
460 +#define I2OHRTGET _IOWR(I2O_MAGIC_NUMBER,1,struct i2o_cmd_hrtlct)
461 +#define I2OLCTGET _IOWR(I2O_MAGIC_NUMBER,2,struct i2o_cmd_hrtlct)
462 +#define I2OPARMSET _IOWR(I2O_MAGIC_NUMBER,3,struct i2o_cmd_psetget)
463 +#define I2OPARMGET _IOWR(I2O_MAGIC_NUMBER,4,struct i2o_cmd_psetget)
464 +#define I2OSWDL _IOWR(I2O_MAGIC_NUMBER,5,struct i2o_sw_xfer)
465 +#define I2OSWUL _IOWR(I2O_MAGIC_NUMBER,6,struct i2o_sw_xfer)
466 +#define I2OSWDEL _IOWR(I2O_MAGIC_NUMBER,7,struct i2o_sw_xfer)
467 +#define I2OVALIDATE _IOR(I2O_MAGIC_NUMBER,8,u32)
468 +#define I2OHTML _IOWR(I2O_MAGIC_NUMBER,9,struct i2o_html)
469 +#define I2OEVTREG _IOW(I2O_MAGIC_NUMBER,10,struct i2o_evt_id)
470 +#define I2OEVTGET _IOR(I2O_MAGIC_NUMBER,11,struct i2o_evt_info)
471 +
472 +struct i2o_cmd_hrtlct
473 +{
474 + unsigned int iop; /* IOP unit number */
475 + void *resbuf; /* Buffer for result */
476 + unsigned int *reslen; /* Buffer length in bytes */
477 +};
478 +
479 +struct i2o_cmd_psetget
480 +{
481 + unsigned int iop; /* IOP unit number */
482 + unsigned int tid; /* Target device TID */
483 + void *opbuf; /* Operation List buffer */
484 + unsigned int oplen; /* Operation List buffer length in bytes */
485 + void *resbuf; /* Result List buffer */
486 + unsigned int *reslen; /* Result List buffer length in bytes */
487 +};
488 +
489 +struct i2o_sw_xfer
490 +{
491 + unsigned int iop; /* IOP unit number */
492 + unsigned char flags; /* Flags field */
493 + unsigned char sw_type; /* Software type */
494 + unsigned int sw_id; /* Software ID */
495 + void *buf; /* Pointer to software buffer */
496 + unsigned int *swlen; /* Length of software data */
497 + unsigned int *maxfrag; /* Maximum fragment count */
498 + unsigned int *curfrag; /* Current fragment count */
499 +};
500 +
501 +struct i2o_html
502 +{
503 + unsigned int iop; /* IOP unit number */
504 + unsigned int tid; /* Target device ID */
505 + unsigned int page; /* HTML page */
506 + void *resbuf; /* Buffer for reply HTML page */
507 + unsigned int *reslen; /* Length in bytes of reply buffer */
508 + void *qbuf; /* Pointer to HTTP query string */
509 + unsigned int qlen; /* Length in bytes of query string buffer */
510 +};
511 +
512 +#define I2O_EVT_Q_LEN 32
513 +
514 +struct i2o_evt_id
515 +{
516 + unsigned int iop;
517 + unsigned int tid;
518 + unsigned int evt_mask;
519 +};
520 +
521 +/* Event data size = frame size - message header + evt indicator */
522 +#define I2O_EVT_DATA_SIZE 88
523 +
524 +struct i2o_evt_info
525 +{
526 + struct i2o_evt_id id;
527 + unsigned char evt_data[I2O_EVT_DATA_SIZE];
528 + unsigned int data_size;
529 +};
530 +
531 +struct i2o_evt_get
532 +{
533 + struct i2o_evt_info info;
534 + int pending;
535 + int lost;
536 +};
537 +
538 +
539 +/**************************************************************************
540 + * HRT related constants and structures
541 + **************************************************************************/
542 +#define I2O_BUS_LOCAL 0
543 +#define I2O_BUS_ISA 1
544 +#define I2O_BUS_EISA 2
545 +#define I2O_BUS_MCA 3
546 +#define I2O_BUS_PCI 4
547 +#define I2O_BUS_PCMCIA 5
548 +#define I2O_BUS_NUBUS 6
549 +#define I2O_BUS_CARDBUS 7
550 +#define I2O_BUS_UNKNOWN 0x80
551 +
552 +#ifndef __KERNEL__
553 +
554 +typedef unsigned char u8;
555 +typedef unsigned short u16;
556 +typedef unsigned int u32;
557 +
558 +#endif /* __KERNEL__ */
559 +
560 +typedef struct _i2o_pci_bus {
561 + u8 PciFunctionNumber;
562 + u8 PciDeviceNumber;
563 + u8 PciBusNumber;
564 + u8 reserved;
565 + u16 PciVendorID;
566 + u16 PciDeviceID;
567 +} i2o_pci_bus;
568 +
569 +typedef struct _i2o_local_bus {
570 + u16 LbBaseIOPort;
571 + u16 reserved;
572 + u32 LbBaseMemoryAddress;
573 +} i2o_local_bus;
574 +
575 +typedef struct _i2o_isa_bus {
576 + u16 IsaBaseIOPort;
577 + u8 CSN;
578 + u8 reserved;
579 + u32 IsaBaseMemoryAddress;
580 +} i2o_isa_bus;
581 +
582 +typedef struct _i2o_eisa_bus_info {
583 + u16 EisaBaseIOPort;
584 + u8 reserved;
585 + u8 EisaSlotNumber;
586 + u32 EisaBaseMemoryAddress;
587 +} i2o_eisa_bus;
588 +
589 +typedef struct _i2o_mca_bus {
590 + u16 McaBaseIOPort;
591 + u8 reserved;
592 + u8 McaSlotNumber;
593 + u32 McaBaseMemoryAddress;
594 +} i2o_mca_bus;
595 +
596 +typedef struct _i2o_other_bus {
597 + u16 BaseIOPort;
598 + u16 reserved;
599 + u32 BaseMemoryAddress;
600 +} i2o_other_bus;
601 +
602 +typedef struct _i2o_hrt_entry {
603 + u32 adapter_id;
604 + u32 parent_tid:12;
605 + u32 state:4;
606 + u32 bus_num:8;
607 + u32 bus_type:8;
608 + union {
609 + i2o_pci_bus pci_bus;
610 + i2o_local_bus local_bus;
611 + i2o_isa_bus isa_bus;
612 + i2o_eisa_bus eisa_bus;
613 + i2o_mca_bus mca_bus;
614 + i2o_other_bus other_bus;
615 + } bus;
616 +} i2o_hrt_entry;
617 +
618 +typedef struct _i2o_hrt {
619 + u16 num_entries;
620 + u8 entry_len;
621 + u8 hrt_version;
622 + u32 change_ind;
623 + i2o_hrt_entry hrt_entry[1];
624 +} i2o_hrt;
625 +
626 +typedef struct _i2o_lct_entry {
627 + u32 entry_size:16;
628 + u32 tid:12;
629 + u32 reserved:4;
630 + u32 change_ind;
631 + u32 device_flags;
632 + u32 class_id:12;
633 + u32 version:4;
634 + u32 vendor_id:16;
635 + u32 sub_class;
636 + u32 user_tid:12;
637 + u32 parent_tid:12;
638 + u32 bios_info:8;
639 + u8 identity_tag[8];
640 + u32 event_capabilities;
641 +} i2o_lct_entry;
642 +
643 +typedef struct _i2o_lct {
644 + u32 table_size:16;
645 + u32 boot_tid:12;
646 + u32 lct_ver:4;
647 + u32 iop_flags;
648 + u32 change_ind;
649 + i2o_lct_entry lct_entry[1];
650 +} i2o_lct;
651 +
652 +typedef struct _i2o_status_block {
653 + u16 org_id;
654 + u16 reserved;
655 + u16 iop_id:12;
656 + u16 reserved1:4;
657 + u16 host_unit_id;
658 + u16 segment_number:12;
659 + u16 i2o_version:4;
660 + u8 iop_state;
661 + u8 msg_type;
662 + u16 inbound_frame_size;
663 + u8 init_code;
664 + u8 reserved2;
665 + u32 max_inbound_frames;
666 + u32 cur_inbound_frames;
667 + u32 max_outbound_frames;
668 + char product_id[24];
669 + u32 expected_lct_size;
670 + u32 iop_capabilities;
671 + u32 desired_mem_size;
672 + u32 current_mem_size;
673 + u32 current_mem_base;
674 + u32 desired_io_size;
675 + u32 current_io_size;
676 + u32 current_io_base;
677 + u32 reserved3:24;
678 + u32 cmd_status:8;
679 +} i2o_status_block;
680 +
681 +/* Event indicator mask flags */
682 +#define I2O_EVT_IND_STATE_CHANGE 0x80000000
683 +#define I2O_EVT_IND_GENERAL_WARNING 0x40000000
684 +#define I2O_EVT_IND_CONFIGURATION_FLAG 0x20000000
685 +#define I2O_EVT_IND_LOCK_RELEASE 0x10000000
686 +#define I2O_EVT_IND_CAPABILITY_CHANGE 0x08000000
687 +#define I2O_EVT_IND_DEVICE_RESET 0x04000000
688 +#define I2O_EVT_IND_EVT_MASK_MODIFIED 0x02000000
689 +#define I2O_EVT_IND_FIELD_MODIFIED 0x01000000
690 +#define I2O_EVT_IND_VENDOR_EVT 0x00800000
691 +#define I2O_EVT_IND_DEVICE_STATE 0x00400000
692 +
693 +/* Executive event indicitors */
694 +#define I2O_EVT_IND_EXEC_RESOURCE_LIMITS 0x00000001
695 +#define I2O_EVT_IND_EXEC_CONNECTION_FAIL 0x00000002
696 +#define I2O_EVT_IND_EXEC_ADAPTER_FAULT 0x00000004
697 +#define I2O_EVT_IND_EXEC_POWER_FAIL 0x00000008
698 +#define I2O_EVT_IND_EXEC_RESET_PENDING 0x00000010
699 +#define I2O_EVT_IND_EXEC_RESET_IMMINENT 0x00000020
700 +#define I2O_EVT_IND_EXEC_HW_FAIL 0x00000040
701 +#define I2O_EVT_IND_EXEC_XCT_CHANGE 0x00000080
702 +#define I2O_EVT_IND_EXEC_NEW_LCT_ENTRY 0x00000100
703 +#define I2O_EVT_IND_EXEC_MODIFIED_LCT 0x00000200
704 +#define I2O_EVT_IND_EXEC_DDM_AVAILABILITY 0x00000400
705 +
706 +/* Random Block Storage Event Indicators */
707 +#define I2O_EVT_IND_BSA_VOLUME_LOAD 0x00000001
708 +#define I2O_EVT_IND_BSA_VOLUME_UNLOAD 0x00000002
709 +#define I2O_EVT_IND_BSA_VOLUME_UNLOAD_REQ 0x00000004
710 +#define I2O_EVT_IND_BSA_CAPACITY_CHANGE 0x00000008
711 +#define I2O_EVT_IND_BSA_SCSI_SMART 0x00000010
712 +
713 +/* Event data for generic events */
714 +#define I2O_EVT_STATE_CHANGE_NORMAL 0x00
715 +#define I2O_EVT_STATE_CHANGE_SUSPENDED 0x01
716 +#define I2O_EVT_STATE_CHANGE_RESTART 0x02
717 +#define I2O_EVT_STATE_CHANGE_NA_RECOVER 0x03
718 +#define I2O_EVT_STATE_CHANGE_NA_NO_RECOVER 0x04
719 +#define I2O_EVT_STATE_CHANGE_QUIESCE_REQUEST 0x05
720 +#define I2O_EVT_STATE_CHANGE_FAILED 0x10
721 +#define I2O_EVT_STATE_CHANGE_FAULTED 0x11
722 +
723 +#define I2O_EVT_GEN_WARNING_NORMAL 0x00
724 +#define I2O_EVT_GEN_WARNING_ERROR_THRESHOLD 0x01
725 +#define I2O_EVT_GEN_WARNING_MEDIA_FAULT 0x02
726 +
727 +#define I2O_EVT_CAPABILITY_OTHER 0x01
728 +#define I2O_EVT_CAPABILITY_CHANGED 0x02
729 +
730 +#define I2O_EVT_SENSOR_STATE_CHANGED 0x01
731 +
732 +/*
733 + * I2O classes / subclasses
734 + */
735 +
736 +/* Class ID and Code Assignments
737 + * (LCT.ClassID.Version field)
738 + */
739 +#define I2O_CLASS_VERSION_10 0x00
740 +#define I2O_CLASS_VERSION_11 0x01
741 +
742 +/* Class code names
743 + * (from v1.5 Table 6-1 Class Code Assignments.)
744 + */
745 +
746 +#define I2O_CLASS_EXECUTIVE 0x000
747 +#define I2O_CLASS_DDM 0x001
748 +#define I2O_CLASS_RANDOM_BLOCK_STORAGE 0x010
749 +#define I2O_CLASS_SEQUENTIAL_STORAGE 0x011
750 +#define I2O_CLASS_LAN 0x020
751 +#define I2O_CLASS_WAN 0x030
752 +#define I2O_CLASS_FIBRE_CHANNEL_PORT 0x040
753 +#define I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL 0x041
754 +#define I2O_CLASS_SCSI_PERIPHERAL 0x051
755 +#define I2O_CLASS_ATE_PORT 0x060
756 +#define I2O_CLASS_ATE_PERIPHERAL 0x061
757 +#define I2O_CLASS_FLOPPY_CONTROLLER 0x070
758 +#define I2O_CLASS_FLOPPY_DEVICE 0x071
759 +#define I2O_CLASS_BUS_ADAPTER_PORT 0x080
760 +#define I2O_CLASS_PEER_TRANSPORT_AGENT 0x090
761 +#define I2O_CLASS_PEER_TRANSPORT 0x091
762 +
763 +/*
764 + * Rest of 0x092 - 0x09f reserved for peer-to-peer classes
765 + */
766 +
767 +#define I2O_CLASS_MATCH_ANYCLASS 0xffffffff
768 +
769 +/*
770 + * Subclasses
771 + */
772 +
773 +#define I2O_SUBCLASS_i960 0x001
774 +#define I2O_SUBCLASS_HDM 0x020
775 +#define I2O_SUBCLASS_ISM 0x021
776 +
777 +/* Operation functions */
778 +
779 +#define I2O_PARAMS_FIELD_GET 0x0001
780 +#define I2O_PARAMS_LIST_GET 0x0002
781 +#define I2O_PARAMS_MORE_GET 0x0003
782 +#define I2O_PARAMS_SIZE_GET 0x0004
783 +#define I2O_PARAMS_TABLE_GET 0x0005
784 +#define I2O_PARAMS_FIELD_SET 0x0006
785 +#define I2O_PARAMS_LIST_SET 0x0007
786 +#define I2O_PARAMS_ROW_ADD 0x0008
787 +#define I2O_PARAMS_ROW_DELETE 0x0009
788 +#define I2O_PARAMS_TABLE_CLEAR 0x000A
789 +
790 +/*
791 + * I2O serial number conventions / formats
792 + * (circa v1.5)
793 + */
794 +
795 +#define I2O_SNFORMAT_UNKNOWN 0
796 +#define I2O_SNFORMAT_BINARY 1
797 +#define I2O_SNFORMAT_ASCII 2
798 +#define I2O_SNFORMAT_UNICODE 3
799 +#define I2O_SNFORMAT_LAN48_MAC 4
800 +#define I2O_SNFORMAT_WAN 5
801 +
802 +/*
803 + * Plus new in v2.0 (Yellowstone pdf doc)
804 + */
805 +
806 +#define I2O_SNFORMAT_LAN64_MAC 6
807 +#define I2O_SNFORMAT_DDM 7
808 +#define I2O_SNFORMAT_IEEE_REG64 8
809 +#define I2O_SNFORMAT_IEEE_REG128 9
810 +#define I2O_SNFORMAT_UNKNOWN2 0xff
811 +
812 +/*
813 + * I2O Get Status State values
814 + */
815 +
816 +#define ADAPTER_STATE_INITIALIZING 0x01
817 +#define ADAPTER_STATE_RESET 0x02
818 +#define ADAPTER_STATE_HOLD 0x04
819 +#define ADAPTER_STATE_READY 0x05
820 +#define ADAPTER_STATE_OPERATIONAL 0x08
821 +#define ADAPTER_STATE_FAILED 0x10
822 +#define ADAPTER_STATE_FAULTED 0x11
823 +
824 +#endif /* _I2O_DEV_H */
825 diff -ruN linux-2.6.1/drivers/scsi/dpt/dpti_i2o.h linux-2.6.1-adaptec/drivers/scsi/dpt/dpti_i2o.h
826 --- linux-2.6.1/drivers/scsi/dpt/dpti_i2o.h 2004-01-09 01:59:33.000000000 -0500
827 +++ linux-2.6.1-adaptec/drivers/scsi/dpt/dpti_i2o.h 2004-01-27 14:08:37.104317328 -0500
828 @@ -1,6 +1,5 @@
829 -#ifndef _SCSI_I2O_H
830 -#define _SCSI_I2O_H
831 -
832 +#ifndef _I2O_H
833 +#define _I2O_H
834 /* I2O kernel space accessible structures/APIs
835 *
836 * (c) Copyright 1999, 2000 Red Hat Software
837 @@ -19,10 +18,9 @@
838
839 #ifdef __KERNEL__ /* This file to be included by kernel only */
840
841 -#include <linux/i2o-dev.h>
842 +#include "dpti_i2o-dev.h"
843
844 #include <asm/semaphore.h> /* Needed for MUTEX init macros */
845 -#include <linux/version.h>
846 #include <linux/config.h>
847 #include <linux/notifier.h>
848 #include <asm/atomic.h>
849 @@ -44,10 +42,16 @@
850
851 #define I2O_MAX_MANAGERS 4
852
853 +#include <asm/semaphore.h> /* Needed for MUTEX init macros */
854 +
855 /*
856 * I2O Interface Objects
857 */
858
859 +#include <linux/config.h>
860 +#include <linux/notifier.h>
861 +#include <asm/atomic.h>
862 +
863 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0)
864
865 #define DECLARE_MUTEX(name) struct semaphore name=MUTEX
866 @@ -456,4 +460,4 @@
867
868 #endif /* __KERNEL__ */
869
870 -#endif /* _SCSI_I2O_H */
871 +#endif /* _I2O_H */
872 diff -ruN linux-2.6.1/drivers/scsi/dpt/dpti_ioctl.h linux-2.6.1-adaptec/drivers/scsi/dpt/dpti_ioctl.h
873 --- linux-2.6.1/drivers/scsi/dpt/dpti_ioctl.h 2004-01-09 01:59:27.000000000 -0500
874 +++ linux-2.6.1-adaptec/drivers/scsi/dpt/dpti_ioctl.h 2004-01-27 14:08:37.104317328 -0500
875 @@ -3,10 +3,10 @@
876 -------------------
877 begin : Thu Sep 7 2000
878 copyright : (C) 2001 by Adaptec
879 - email : deanna_bonds@adaptec.com
880 + email : Mark_Salyzyn@adaptec.com
881 + original author : doug_anderson@adaptec.com & deanna_bonds@adaptec.com
882
883 - See Documentation/scsi/dpti.txt for history, notes, license info
884 - and credits
885 + See README.dpti for history, notes, license info, and credits
886 ***************************************************************************/
887
888 /***************************************************************************
889 diff -ruN linux-2.6.1/drivers/scsi/dpt/dptsig.h linux-2.6.1-adaptec/drivers/scsi/dpt/dptsig.h
890 --- linux-2.6.1/drivers/scsi/dpt/dptsig.h 2004-01-09 02:00:02.000000000 -0500
891 +++ linux-2.6.1-adaptec/drivers/scsi/dpt/dptsig.h 2004-01-27 14:08:37.104317328 -0500
892 @@ -1,7 +1,8 @@
893 /* BSDI dptsig.h,v 1.7 1998/06/03 19:15:00 karels Exp */
894
895 /*
896 - * Copyright (c) 1996-1999 Distributed Processing Technology Corporation
897 + * Copyright (c) 1996-2000 Distributed Processing Technology Corporation
898 + * Copyright (c) 2000-2001 Adaptec Corporation.
899 * All rights reserved.
900 *
901 * Redistribution and use in source form, with or without modification, are
902 @@ -92,6 +93,7 @@
903 #define PROC_INTEL 0x00 /* Intel 80x86 */
904 #define PROC_MOTOROLA 0x01 /* Motorola 68K */
905 #define PROC_MIPS4000 0x02 /* MIPS RISC 4000 */
906 +#define PROC_MIPS PROC_MIPS4000 /* MIPS RISC */
907 #define PROC_ALPHA 0x03 /* DEC Alpha */
908 #define PROC_POWERPC 0x04 /* IBM Power PC */
909 #define PROC_i960 0x05 /* Intel i960 */
910 @@ -108,10 +110,14 @@
911 #define PROC_486 0x08 /* Intel 80486 */
912 #define PROC_PENTIUM 0x10 /* Intel 586 aka P5 aka Pentium */
913 #define PROC_SEXIUM 0x20 /* Intel 686 aka P6 aka Pentium Pro or MMX */
914 +#define PROC_ITANIUM 0x40 /* Intel Itanium 64 bit */
915
916 /* PROC_i960: */
917 -#define PROC_960RX 0x01 /* Intel 80960RC/RD */
918 +#define PROC_960RX 0x01 /* Intel 80960RP/RD */
919 #define PROC_960HX 0x02 /* Intel 80960HA/HD/HT */
920 +#define PROC_960RN 0x03 /* Intel 80960RN/RM */
921 +#define PROC_960RS 0x04 /* Intel 80960RS */
922 +#define PROC_80303 0x05 /* Intel 80303 (ZION) */
923
924 /* PROC_MOTOROLA: */
925 #define PROC_68000 0x01 /* Motorola 68000 */
926 @@ -125,8 +131,9 @@
927 #define PROC_PPC603 0x02 /* PowerPC 603 */
928 #define PROC_PPC604 0x04 /* PowerPC 604 */
929
930 -/* PROC_MIPS4000: */
931 +/* PROC_MIPS */
932 #define PROC_R4000 0x01 /* MIPS R4000 */
933 +#define PROC_RM7000 0x02 /* MIPS RM7000 */
934
935 /* Filetype - sigBYTE dsFiletype; DISTINCT VALUES */
936 /* ------------------------------------------------------------------ */
937 @@ -147,6 +154,7 @@
938 #define FT_LIBRARY 14 /* Storage Manager Real-Mode Calls */
939 #define FT_RESOURCE 15 /* Storage Manager Resource File */
940 #define FT_MODEM_DB 16 /* Storage Manager Modem Database */
941 +#define FT_DMI 17 /* DMI component interface */
942
943 /* Filetype flags - sigBYTE dsFiletypeFlags; FLAG BITS */
944 /* ------------------------------------------------------------------ */
945 @@ -170,6 +178,7 @@
946 #define OEM_OLIVETTI 5 /* Olivetti */
947 #define OEM_SNI 6 /* Siemens/Nixdorf */
948 #define OEM_SUN 7 /* SUN Microsystems */
949 +#define OEM_ADAPTEC 8 /* Adaptec */
950
951 /* Operating System - sigLONG dsOS; FLAG BITS */
952 /* ------------------------------------------------------------------ */
953 @@ -202,6 +211,8 @@
954 #define OS_SINIX_N 0x04000000 /* SNI SINIX-N */
955 #define OS_PLAN9 0x08000000 /* ATT Plan 9 */
956 #define OS_TSX 0x10000000 /* SNH TSX-32 */
957 +#define OS_WINDOWS_98 0x20000000 /* Microsoft Windows '98 */
958 +#define OS_NW5x 0x40000000 /* Novell Netware 5x */
959
960 #define OS_OTHER 0x80000000 /* Other */
961
962 @@ -284,6 +295,93 @@
963 #define REQ_ENGINE 0x10 /* Requires a DPT Engine to be loaded */
964 #define REQ_COMM_ENG 0x20 /* Requires a DPT Communications Engine */
965
966 +/* ------------------------------------------------------------------ */
967 +/* Requirements - sigWORD dsFirmware; FLAG BITS */
968 +/* ------------------------------------------------------------------ */
969 +#define dsFirmware dsApplication
970 +#define FW_DNLDSIZE16_OLD 0x0000 /* 0..3 DownLoader Size 16K - TO SUPPORT OLD IMAGES */
971 +#define FW_DNLDSIZE16k 0x0000 /* 0..3 DownLoader Size 16k */
972 +#define FW_DNLDSIZE16 0x0001 /* 0..3 DownLoader Size 16K */
973 +#define FW_DNLDSIZE32 0x0002 /* 0..3 DownLoader Size 32K */
974 +#define FW_DNLDSIZE64 0x0004 /* 0..3 DownLoader Size 64K */
975 +#define FW_DNLDSIZE0 0x000f /* 0..3 DownLoader Size 0K - NONE */
976 +#define FW_DNLDSIZE_NONE 0x000F /* 0..3 DownLoader Size - NONE */
977 +
978 + /* Code Offset is position of the code within the ROM CODE Segment */
979 +#define FW_DNLDR_TOP 0x0000 /* 12 DownLoader Position (0=Top, 1=Bottom) */
980 +#define FW_DNLDR_BTM 0x1000 /* 12 DownLoader Position (0=Top, 1=Bottom) Dominator */
981 +
982 +#define FW_LOAD_BTM 0x0000 /* 13 Code Offset (0=Btm, 1=Top) MIPS */
983 +#define FW_LOAD_TOP 0x2000 /* 13 Code Offset (0=Btm, 1=Top) i960 */
984 +
985 +#define FW_SIG_VERSION1 0x0000 /* 15..14 Version Bits 0=Ver1 */
986 +#define FW_SIG_VERSION2 0x4000 /* 15..14 Version Bits 1=Ver2 */
987 +
988 +/*
989 + 0..3 Downloader Size (Value * 16K)
990 +
991 + 4
992 + 5
993 + 6
994 + 7
995 +
996 + 8
997 + 9
998 + 10
999 + 11
1000 +
1001 + 12 Downloader Position (0=Top of Image 1= Bottom of Image (Dominator) )
1002 + 13 Load Offset (0=BTM (MIPS) -- 1=TOP (960) )
1003 + 14..15 F/W Sig Version (0=Ver1)
1004 +*/
1005 +
1006 +/* ------------------------------------------------------------------ */
1007 +/* Sub System Vendor IDs - The PCI Sub system and vendor IDs for each */
1008 +/* Adaptec Raid controller */
1009 +/* ------------------------------------------------------------------ */
1010 +#define PM1554U2_SUB_ID 0xC0011044
1011 +#define PM1654U2_SUB_ID 0xC0021044
1012 +#define PM1564U3_1_SUB_ID 0xC0031044
1013 +#define PM1564U3_2_SUB_ID 0xC0041044
1014 +#define PM1554U2_NOACPI_SUB_ID 0xC0051044
1015 +#define PM2554U2_SUB_ID 0xC00A1044
1016 +#define PM2654U2_SUB_ID 0xC00B1044
1017 +#define PM2664U3_1_SUB_ID 0xC00C1044
1018 +#define PM2664U3_2_SUB_ID 0xC00D1044
1019 +#define PM2554U2_NOACPI_SUB_ID 0xC00E1044
1020 +#define PM2654U2_NOACPI_SUB_ID 0xC00F1044
1021 +#define PM3754U2_SUB_ID 0xC0141044
1022 +#define PM3755U2B_SUB_ID 0xC0151044
1023 +#define PM3755F_SUB_ID 0xC0161044
1024 +#define PM3757U2_1_SUB_ID 0xC01E1044
1025 +#define PM3757U2_2_SUB_ID 0xC01F1044
1026 +#define PM3767U3_2_SUB_ID 0xC0201044
1027 +#define PM3767U3_4_SUB_ID 0xC0211044
1028 +#define PM2865U3_1_SUB_ID 0xC0281044
1029 +#define PM2865U3_2_SUB_ID 0xC0291044
1030 +#define PM2865F_SUB_ID 0xC02A1044
1031 +#define ADPT2000S_1_SUB_ID 0xC03C1044
1032 +#define ADPT2000S_2_SUB_ID 0xC03D1044
1033 +#define ADPT2000F_SUB_ID 0xC03E1044
1034 +#define ADPT3000S_1_SUB_ID 0xC0461044
1035 +#define ADPT3000S_2_SUB_ID 0xC0471044
1036 +#define ADPT3000F_SUB_ID 0xC0481044
1037 +#define ADPT5000S_1_SUB_ID 0xC0501044
1038 +#define ADPT5000S_2_SUB_ID 0xC0511044
1039 +#define ADPT5000F_SUB_ID 0xC0521044
1040 +#define ADPT1000UDMA_SUB_ID 0xC05A1044
1041 +#define ADPT1000UDMA_DAC_SUB_ID 0xC05B1044
1042 +#define ADPTI2O_DEVICE_ID 0xa501
1043 +#define ADPTDOMINATOR_DEVICE_ID 0xa511
1044 +#define ADPTDOMINATOR_SUB_ID_START 0xC0321044
1045 +#define ADPTDOMINATOR_SUB_ID_END 0xC03b1044
1046 +
1047 +
1048 +
1049 +/* ------------------------------------------------------------------ */
1050 +/* ------------------------------------------------------------------ */
1051 +/* ------------------------------------------------------------------ */
1052 +
1053 /*
1054 * You may adjust dsDescription_size with an override to a value less than
1055 * 50 so that the structure allocates less real space.
1056 @@ -318,6 +416,35 @@
1057 /* 32 bytes minimum - with no description. Put NULL at description[0] */
1058 /* 81 bytes maximum - with 49 character description plus NULL. */
1059
1060 +#if defined __bsdi__
1061 +#ifndef PACK
1062 +#define PACK __attribute__ ((packed))
1063 +#endif
1064 +typedef struct dpt_sig_Packed {
1065 + char dsSignature[6] PACK; /* ALWAYS "dPtSiG" */
1066 + sigBYTE dsSigVersion PACK; /* signature version (currently 1) */
1067 + sigBYTE dsProcessorFamily PACK; /* what type of processor */
1068 + sigBYTE dsProcessor PACK; /* precise processor */
1069 + sigBYTE dsFiletype PACK; /* type of file */
1070 + sigBYTE dsFiletypeFlags PACK; /* flags to specify load type, etc. */
1071 + sigBYTE dsOEM PACK; /* OEM file was created for */
1072 + sigLONG dsOS PACK; /* which Operating systems */
1073 + sigWORD dsCapabilities PACK; /* RAID levels, etc. */
1074 + sigWORD dsDeviceSupp PACK; /* Types of SCSI devices supported */
1075 + sigWORD dsAdapterSupp PACK; /* DPT adapter families supported */
1076 + sigWORD dsApplication PACK; /* applications file is for */
1077 + sigBYTE dsRequirements PACK; /* Other driver dependencies */
1078 + sigBYTE dsVersion PACK; /* 1 */
1079 + sigBYTE dsRevision PACK; /* 'J' */
1080 + sigBYTE dsSubRevision PACK; /* '9' ' ' if N/A */
1081 + sigBYTE dsMonth PACK; /* creation month */
1082 + sigBYTE dsDay PACK; /* creation day */
1083 + sigBYTE dsYear PACK; /* creation year since 1980 (1993=13) */
1084 + /* description (NULL terminated) */
1085 + char dsDescription[dsDescription_size] PACK;
1086 +} dpt_sig_S_Packed;
1087 +#define PACKED_SIG_SIZE sizeof(dpt_sig_S_Packed)
1088 +#endif
1089 /* This line added at Roycroft's request */
1090 /* Microsoft's NT compiler gets confused if you do a pack and don't */
1091 /* restore it. */
1092 diff -ruN linux-2.6.1/drivers/scsi/dpt_i2o.c linux-2.6.1-adaptec/drivers/scsi/dpt_i2o.c
1093 --- linux-2.6.1/drivers/scsi/dpt_i2o.c 2004-01-09 01:59:56.000000000 -0500
1094 +++ linux-2.6.1-adaptec/drivers/scsi/dpt_i2o.c 2004-01-27 14:08:58.472068936 -0500
1095 @@ -1,14 +1,15 @@
1096 /***************************************************************************
1097 - dpti.c - description
1098 + dpt_i2o.c - description
1099 -------------------
1100 begin : Thu Sep 7 2000
1101 - copyright : (C) 2000 by Adaptec
1102 - email : deanna_bonds@adaptec.com
1103 + copyright : (C) 2000-2003 by Adaptec
1104 + email : Mark_Salyzyn@adaptec.com
1105 + original author : deanna_bonds@adaptec.com
1106
1107 - July 30, 2001 First version being submitted
1108 + July 30, 2001 First version being submitted
1109 for inclusion in the kernel. V2.4
1110
1111 - See Documentation/scsi/dpti.txt for history, notes, license info
1112 + See Documentation/scsi/dpti.txt for history, notes, license info,
1113 and credits
1114 ***************************************************************************/
1115
1116 @@ -24,19 +25,21 @@
1117 //#define DEBUG 1
1118 //#define UARTDELAY 1
1119
1120 -// On the real kernel ADDR32 should always be zero for 2.4. GFP_HIGH allocates
1121 -// high pages. Keep the macro around because of the broken unmerged ia64 tree
1122 -
1123 -#define ADDR32 (0)
1124 -
1125 -#error Please convert me to Documentation/DMA-mapping.txt
1126 -
1127 #include <linux/version.h>
1128 +
1129 #include <linux/module.h>
1130
1131 -MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
1132 +MODULE_AUTHOR("Deanna Bonds & Mark Salyzyn");
1133 MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
1134
1135 +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0))
1136 +static char kernel_version[] = UTS_RELEASE;
1137 +#endif
1138 +
1139 +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,4,18)) && (LINUX_VERSION_CODE != KERNEL_VERSION(2,4,13)) && (LINUX_VERSION_CODE != KERNEL_VERSION(2,4,9))
1140 +# define dma_handle ptr
1141 +#endif
1142 +
1143 ////////////////////////////////////////////////////////////////
1144
1145 #include <linux/ioctl.h> /* For SCSI-Passthrough */
1146 @@ -47,7 +50,12 @@
1147 #include <linux/config.h> /* for CONFIG_PCI */
1148 #include <linux/pci.h> /* for PCI support */
1149 #include <linux/proc_fs.h>
1150 -#include <linux/blkdev.h>
1151 +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,65))
1152 +# include <linux/blkdev.h>
1153 +#else
1154 +# include <linux/blk.h>
1155 +# include <linux/tqueue.h>
1156 +#endif
1157 #include <linux/delay.h> /* for udelay */
1158 #include <linux/interrupt.h>
1159 #include <linux/kernel.h> /* for printk */
1160 @@ -62,14 +70,30 @@
1161
1162 #include <asm/processor.h> /* for boot_cpu_data */
1163 #include <asm/pgtable.h>
1164 -#include <asm/io.h> /* for virt_to_bus, etc. */
1165 +#include <asm/io.h>
1166
1167 #include "scsi.h"
1168 #include "hosts.h"
1169 +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,65))
1170 +# include "sd.h"
1171 +#endif
1172
1173 #include "dpt/dptsig.h"
1174 #include "dpti.h"
1175
1176 +#if (defined(__x86_64__))
1177 +# include <asm-x86_64/ioctl32.h>
1178 +#endif
1179 +
1180 +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,4,2)
1181 +static inline int pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask)
1182 +{
1183 + dev->dma_mask = mask;
1184 +
1185 + return 0;
1186 +}
1187 +#endif
1188 +
1189 /*============================================================================
1190 * Create a binary signature - this is read by dptsig
1191 * Needed for our management apps
1192 @@ -79,14 +103,16 @@
1193 {'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION,
1194 #ifdef __i386__
1195 PROC_INTEL, PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
1196 -#elif defined(__ia64__)
1197 - PROC_INTEL, PROC_IA64,
1198 -#elif defined(__sparc__)
1199 - PROC_ULTRASPARC,
1200 -#elif defined(__alpha__)
1201 - PROC_ALPHA ,
1202 +#elif defined __ia64__
1203 + PROC_INTEL, PROC_ITANIUM,
1204 +#elif defined __x86_64__
1205 + PROC_INTEL, PROC_SEXIUM,
1206 +#elif defined __sparc__
1207 + PROC_ULTRASPARC, ~(sigBYTE)0U,
1208 +#elif defined __alpha__
1209 + PROC_ALPHA, ~(sigBYTE)0U,
1210 #else
1211 - (-1),(-1)
1212 + ~(sigBYTE)0U, ~(sigBYTE)0U,
1213 #endif
1214 FT_HBADRVR, 0, OEM_DPT, OS_LINUX, CAP_OVERLAP, DEV_ALL,
1215 ADF_ALL_SC5, 0, 0, DPT_VERSION, DPT_REVISION, DPT_SUBREVISION,
1216 @@ -101,9 +127,13 @@
1217 *============================================================================
1218 */
1219
1220 +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0))
1221 +static struct semaphore adpt_configuration_lock = MUTEX;
1222 +#else
1223 DECLARE_MUTEX(adpt_configuration_lock);
1224 -
1225 -static struct i2o_sys_tbl *sys_tbl = NULL;
1226 +#endif
1227 +static struct i2o_sys_tbl *sys_tbl_va = NULL;
1228 +static dma_addr_t sys_tbl_pa;
1229 static int sys_tbl_ind = 0;
1230 static int sys_tbl_len = 0;
1231
1232 @@ -111,6 +141,15 @@
1233 static adpt_hba* hba_chain = NULL;
1234 static int hba_count = 0;
1235
1236 +// If this is driver is embedded in the kernel this define
1237 +// should be moved to include/linux/proc_fs.h as an emumerated type
1238 +#define PROC_SCSI_DPT_I2O 0
1239 +struct proc_dir_entry proc_scsi_dptI2O = {
1240 + PROC_SCSI_DPT_I2O, 7, DPT_DRIVER,
1241 + S_IFDIR | S_IRUGO | S_IXUGO, 2,
1242 + 0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL
1243 +};
1244 +
1245 static struct file_operations adpt_fops = {
1246 .ioctl = adpt_ioctl,
1247 .open = adpt_open,
1248 @@ -149,9 +188,9 @@
1249
1250 static u8 adpt_read_blink_led(adpt_hba* host)
1251 {
1252 - if(host->FwDebugBLEDflag_P != 0) {
1253 - if( readb(host->FwDebugBLEDflag_P) == 0xbc ){
1254 - return readb(host->FwDebugBLEDvalue_P);
1255 + if(host->bled_flag_addr_virt != 0) {
1256 + if( readb(host->bled_flag_addr_virt) == 0xbc ){
1257 + return readb(host->bled_value_addr_virt);
1258 }
1259 }
1260 return 0;
1261 @@ -178,10 +217,11 @@
1262
1263 PINFO("Detecting Adaptec I2O RAID controllers...\n");
1264
1265 - /* search for all Adatpec I2O RAID cards */
1266 - while ((pDev = pci_find_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) {
1267 - if(pDev->device == PCI_DPT_DEVICE_ID ||
1268 - pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){
1269 + /* search for all Adaptec I2O RAID cards */
1270 +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0))
1271 + while ((pDev = pci_find_device( dptids[0].vendor, PCI_ANY_ID, pDev))) {
1272 + if(pDev->device == dptids[0].device ||
1273 + pDev->device == dptids[1].device){
1274 if(adpt_install_hba(sht, pDev) ){
1275 PERROR("Could not Init an I2O RAID device\n");
1276 PERROR("Will not try to detect others.\n");
1277 @@ -189,6 +229,18 @@
1278 }
1279 }
1280 }
1281 +#else
1282 + while ((pDev = adpt_pci_find_device( dptids[0].vendor, pDev))) {
1283 + if(pDev->device == dptids[0].device ||
1284 + pDev->device == dptids[1].device){
1285 + if(adpt_install_hba(sht, pDev) ){
1286 + PERROR("Could not Init an I2O RAID device\n");
1287 + PERROR("Will not try to detect others.\n");
1288 + return hba_count-1;
1289 + }
1290 + }
1291 + }
1292 +#endif
1293
1294 /* In INIT state, Activate IOPs */
1295 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1296 @@ -242,11 +294,23 @@
1297 adpt_inquiry(pHba);
1298 }
1299
1300 +#if 0
1301 +printk (KERN_INFO"dpti: Register us with the SCSI system\n");
1302 +#endif
1303 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1304 +#if 0
1305 +printk (KERN_INFO"adpt_scsi_register(%p,%p)\n", pHba, sht);
1306 +#endif
1307 if( adpt_scsi_register(pHba,sht) < 0){
1308 +#if 0
1309 +printk (KERN_INFO"adpt_i2o_delete_hba(%p)\n", pHba);
1310 +#endif
1311 adpt_i2o_delete_hba(pHba);
1312 continue;
1313 }
1314 +#if 0
1315 +printk (KERN_INFO"registered\n");
1316 +#endif
1317 pHba->initialized = TRUE;
1318 pHba->state &= ~DPTI_STATE_RESET;
1319 }
1320 @@ -254,10 +318,27 @@
1321 // Register our control device node
1322 // nodes will need to be created in /dev to access this
1323 // the nodes can not be created from within the driver
1324 +#if 0
1325 +printk (KERN_INFO"dpti: Register us with the char device system\n");
1326 +#endif
1327 if (hba_count && register_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER, &adpt_fops)) {
1328 adpt_i2o_sys_shutdown();
1329 return 0;
1330 }
1331 +# if (defined(__x86_64__))
1332 + register_ioctl32_conversion(DPT_SIGNATURE, sys_ioctl);
1333 + register_ioctl32_conversion(I2OUSRCMD, sys_ioctl);
1334 + register_ioctl32_conversion(DPT_CTRLINFO, sys_ioctl);
1335 + register_ioctl32_conversion(DPT_SYSINFO, sys_ioctl);
1336 + register_ioctl32_conversion(DPT_BLINKLED, sys_ioctl);
1337 + register_ioctl32_conversion(I2ORESETCMD, sys_ioctl);
1338 + register_ioctl32_conversion(I2ORESCANCMD, sys_ioctl);
1339 + register_ioctl32_conversion(DPT_TARGET_BUSY & 0xFFFF, sys_ioctl);
1340 + register_ioctl32_conversion(DPT_TARGET_BUSY, sys_ioctl);
1341 +# endif
1342 +#if 0
1343 +printk (KERN_INFO"dpti: %d adapters\n", hba_count);
1344 +#endif
1345 return hba_count;
1346 }
1347
1348 @@ -284,11 +365,12 @@
1349 u32 len;
1350 u32 reqlen;
1351 u8* buf;
1352 + dma_addr_t addr;
1353 u8 scb[16];
1354 s32 rcode;
1355
1356 memset(msg, 0, sizeof(msg));
1357 - buf = (u8*)kmalloc(80,GFP_KERNEL|ADDR32);
1358 + buf = (u8*)pci_alloc_consistent(pHba->pDev, 80, &addr);
1359 if(!buf){
1360 printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
1361 return;
1362 @@ -301,18 +383,18 @@
1363
1364 reqlen = 14; // SINGLE SGE
1365 /* Stick the headers on */
1366 - msg[0] = reqlen<<16 | SGL_OFFSET_12;
1367 - msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
1368 + msg[0] = cpu_to_le32(reqlen<<16 | SGL_OFFSET_12);
1369 + msg[1] = cpu_to_le32(0xff<<24|HOST_TID<<12|ADAPTER_TID);
1370 msg[2] = 0;
1371 - msg[3] = 0;
1372 + msg[3] = 0;
1373 // Adaptec/DPT Private stuff
1374 - msg[4] = I2O_CMD_SCSI_EXEC|DPT_ORGANIZATION_ID<<16;
1375 - msg[5] = ADAPTER_TID | 1<<16 /* Interpret*/;
1376 + msg[4] = cpu_to_le32(I2O_CMD_SCSI_EXEC|DPT_ORGANIZATION_ID<<16);
1377 + msg[5] = cpu_to_le32(ADAPTER_TID | 1<<16) /* Interpret*/;
1378 /* Direction, disconnect ok | sense data | simple queue , CDBLen */
1379 // I2O_SCB_FLAG_ENABLE_DISCONNECT |
1380 // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
1381 // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
1382 - msg[6] = scsidir|0x20a00000| 6 /* cmd len*/;
1383 + msg[6] = cpu_to_le32(scsidir|0x20a00000| 6) /* cmd len*/;
1384
1385 mptr=msg+7;
1386
1387 @@ -331,15 +413,28 @@
1388 lenptr=mptr++; /* Remember me - fill in when we know */
1389
1390 /* Now fill in the SGList and command */
1391 - *lenptr = len;
1392 - *mptr++ = 0xD0000000|direction|len;
1393 - *mptr++ = virt_to_bus(buf);
1394 + *lenptr = cpu_to_le32(len);
1395 + /* The following test gets optimized out if dma_addr_t is <= 32 bits */
1396 + if( (sizeof(dma_addr_t) > 4) && (pHba->pae_support) && (((u64)addr >> 32) != 0) ) {
1397 + *mptr++ = cpu_to_le32((0x7C<<24)+(2<<16)+0x02); /* Enable 64 bit */
1398 + *mptr++ = cpu_to_le32(1 << PAGE_SHIFT);
1399 + *mptr++ = cpu_to_le32(0xD0000000|direction|len);
1400 + *mptr++ = cpu_to_le32(addr);
1401 + *mptr++ = cpu_to_le32((u64)addr >> 32);
1402 + reqlen += 3;
1403 + msg[0] = cpu_to_le32(reqlen<<16 | SGL_OFFSET_12);
1404 + } else {
1405 + *mptr++ = cpu_to_le32(0xD0000000|direction|len);
1406 + *mptr++ = cpu_to_le32(addr);
1407 + }
1408
1409 // Send it on it's way
1410 rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
1411 if (rcode != 0) {
1412 sprintf(pHba->detail, "Adaptec I2O RAID");
1413 printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
1414 + if (rcode != -ETIME && rcode != -EINTR)
1415 + pci_free_consistent(pHba->pDev, 80, buf, addr);
1416 } else {
1417 memset(pHba->detail, 0, sizeof(pHba->detail));
1418 memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
1419 @@ -348,28 +443,62 @@
1420 memcpy(&(pHba->detail[40]), " FW: ", 4);
1421 memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
1422 pHba->detail[48] = '\0'; /* precautionary */
1423 + pci_free_consistent(pHba->pDev, 80, buf, addr);
1424 }
1425 - kfree(buf);
1426 adpt_i2o_status_get(pHba);
1427 return ;
1428 }
1429
1430
1431 +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,65))
1432 static int adpt_slave_configure(Scsi_Device * device)
1433 {
1434 - struct Scsi_Host *host = device->host;
1435 - adpt_hba* pHba;
1436 + struct Scsi_Host * host = device->host;
1437 + adpt_hba * pHba;
1438
1439 pHba = (adpt_hba *) host->hostdata[0];
1440
1441 if (host->can_queue && device->tagged_supported) {
1442 scsi_adjust_queue_depth(device, MSG_SIMPLE_TAG,
1443 - host->can_queue - 1);
1444 + host->can_queue - 1);
1445 } else {
1446 scsi_adjust_queue_depth(device, 0, 1);
1447 - }
1448 + }
1449 return 0;
1450 }
1451 +#else
1452 +static void adpt_select_queue_depths(struct Scsi_Host *host, Scsi_Device * devicelist)
1453 +{
1454 + Scsi_Device *device; /* scsi layer per device information */
1455 + adpt_hba* pHba;
1456 +
1457 + pHba = (adpt_hba *) host->hostdata[0];
1458 +
1459 + for (device = devicelist; device != NULL; device = device->next) {
1460 + if (device->host != host) {
1461 + continue;
1462 + }
1463 + if (host->can_queue) {
1464 + device->queue_depth = host->can_queue - 1;
1465 + } else {
1466 + device->queue_depth = 1;
1467 + }
1468 + }
1469 +}
1470 +#endif
1471 +#if 0
1472 +void adpt_sleep(void)
1473 +{
1474 + spinlock_t * was_locked = (spinlock_t *)NULL;
1475 + if (spin_is_locked(&io_request_lock)) {
1476 + was_locked = &io_request_lock;
1477 + spin_unlock_irq(was_locked);
1478 + }
1479 + scsi_sleep(1);
1480 + if (was_locked)
1481 + spin_lock_irq(was_locked);
1482 +}
1483 +#endif
1484
1485 static int adpt_queue(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
1486 {
1487 @@ -377,6 +506,10 @@
1488 struct adpt_device* pDev = NULL; /* dpt per device information */
1489 ulong timeout = jiffies + (TMOUT_SCSI*HZ);
1490
1491 +#if 0
1492 +printk (KERN_INFO"adpt_queue(%p,%p)\n", cmd, done);
1493 +adpt_sleep();
1494 +#endif
1495 cmd->scsi_done = done;
1496 /*
1497 * SCSI REQUEST_SENSE commands will be executed automatically by the
1498 @@ -411,8 +544,9 @@
1499 return 1;
1500 }
1501
1502 - if(cmd->eh_state != SCSI_STATE_QUEUED){
1503 - // If we are not doing error recovery
1504 + if ((cmd->eh_state != SCSI_STATE_QUEUED)
1505 + && (cmd->device->type == TYPE_DISK)) {
1506 + // If the controller is doing error recovery
1507 mod_timer(&cmd->eh_timeout, timeout);
1508 }
1509
1510 @@ -424,6 +558,10 @@
1511 * to the device structure. This should be a TEST_UNIT_READY
1512 * command from scan_scsis_single.
1513 */
1514 +#if 0
1515 +printk (KERN_INFO"adpt_find_device(%p,%d,%d,%d)\n", pHba, cmd->device->channel, cmd->device->id, cmd->device->lun);
1516 +adpt_sleep();
1517 +#endif
1518 if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun)) == NULL) {
1519 // TODO: if any luns are at this bus, scsi id then fake a TEST_UNIT_READY and INQUIRY response
1520 // with type 7F (for all luns less than the max for this bus,id) so the lun scan will continue.
1521 @@ -431,8 +569,16 @@
1522 cmd->scsi_done(cmd);
1523 return 0;
1524 }
1525 +#if 0
1526 +printk (KERN_INFO"pDev=%p\n", pDev);
1527 +adpt_sleep();
1528 +#endif
1529 (struct adpt_device*)(cmd->device->hostdata) = pDev;
1530 }
1531 +#if 0
1532 +printk (KERN_INFO"pDev->pScsi_dev=%p\n", cmd->device);
1533 +adpt_sleep();
1534 +#endif
1535 pDev->pScsi_dev = cmd->device;
1536
1537 /*
1538 @@ -445,12 +591,21 @@
1539 return adpt_scsi_to_i2o(pHba, cmd, pDev);
1540 }
1541
1542 -static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev,
1543 - sector_t capacity, int geom[])
1544 +static int adpt_bios_param(
1545 +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,65))
1546 + struct scsi_device *sdev, struct block_device *dev, sector_t capacity,
1547 +#else
1548 + Disk* disk, kdev_t dev,
1549 +#endif
1550 + int geom[])
1551 {
1552 int heads=-1;
1553 int sectors=-1;
1554 int cylinders=-1;
1555 +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,65))
1556 + unsigned long capacity = disk->capacity;
1557 + Scsi_Device * sdev = disk->device;
1558 +#endif
1559
1560 // *** First lets set the default geometry ****
1561
1562 @@ -479,7 +634,12 @@
1563 heads = 255;
1564 sectors = 63;
1565 }
1566 +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,65))
1567 + sector_div(capacity, heads * sectors);
1568 + cylinders = (unsigned)capacity;
1569 +#else
1570 cylinders = capacity / (heads * sectors);
1571 +#endif
1572
1573 // Special case if CDROM
1574 if(sdev->type == 5) { // CDROM
1575 @@ -505,8 +665,15 @@
1576 return (char *) (pHba->detail);
1577 }
1578
1579 -static int adpt_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset,
1580 - int length, int inout)
1581 +static int adpt_proc_info(
1582 +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,65))
1583 + struct Scsi_Host *host,
1584 +#endif
1585 + char *buffer, char **start, off_t offset, int length,
1586 +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,65))
1587 + int hostno,
1588 +#endif
1589 + int inout)
1590 {
1591 struct adpt_device* d;
1592 int id;
1593 @@ -515,6 +682,9 @@
1594 int begin = 0;
1595 int pos = 0;
1596 adpt_hba* pHba;
1597 +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,65))
1598 + struct Scsi_Host *host;
1599 +#endif
1600 int unit;
1601
1602 *start = buffer;
1603 @@ -538,7 +708,12 @@
1604 // Find HBA (host bus adapter) we are looking for
1605 down(&adpt_configuration_lock);
1606 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1607 - if (pHba->host == host) {
1608 +# if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,65))
1609 + if (pHba->host == host)
1610 +# else
1611 + if (pHba->host->host_no == hostno)
1612 +# endif
1613 + {
1614 break; /* found adapter */
1615 }
1616 }
1617 @@ -548,7 +723,11 @@
1618 }
1619 host = pHba->host;
1620
1621 - len = sprintf(buffer , "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION);
1622 +# if (defined(DPT_I2O_DRIVER_BUILD))
1623 + len = sprintf(buffer , "Adaptec I2O RAID Driver Version: %s[%d]\n\n", DPT_I2O_VERSION, DPT_I2O_DRIVER_BUILD);
1624 +# else
1625 + len = sprintf(buffer , "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION);
1626 +# endif
1627 len += sprintf(buffer+len, "%s\n", pHba->detail);
1628 len += sprintf(buffer+len, "SCSI Host=scsi%d Control Node=/dev/%s irq=%d\n",
1629 pHba->host->host_no, pHba->name, host->irq);
1630 @@ -575,8 +754,10 @@
1631 for(id = 0; id < MAX_ID; id++) {
1632 d = pHba->channel[chan].device[id];
1633 while(d){
1634 - len += sprintf(buffer+len,"\t%-24.24s", d->pScsi_dev->vendor);
1635 - len += sprintf(buffer+len," Rev: %-8.8s\n", d->pScsi_dev->rev);
1636 + if (d->pScsi_dev) {
1637 + len += sprintf(buffer+len,"\t%-24.24s", d->pScsi_dev->vendor);
1638 + len += sprintf(buffer+len," Rev: %-8.8s\n", d->pScsi_dev->rev);
1639 + }
1640 pos = begin + len;
1641
1642
1643 @@ -589,10 +770,10 @@
1644 begin = pos;
1645 }
1646
1647 - unit = d->pI2o_dev->lct_data.tid;
1648 + unit = le32_to_cpu(d->pI2o_dev->lct_data.tid);
1649 len += sprintf(buffer+len, "\tTID=%d, (Channel=%d, Target=%d, Lun=%d) (%s)\n\n",
1650 unit, (int)d->scsi_channel, (int)d->scsi_id, (int)d->scsi_lun,
1651 - d->pScsi_dev->online? "online":"offline");
1652 + (d->pScsi_dev && d->pScsi_dev->online)? "online":"offline");
1653 pos = begin + len;
1654
1655 /* CHECKPOINT */
1656 @@ -654,11 +835,14 @@
1657 }
1658
1659 memset(msg, 0, sizeof(msg));
1660 - msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
1661 - msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
1662 + msg[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE|SGL_OFFSET_0);
1663 + msg[1] = cpu_to_le32(I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid);
1664 msg[2] = 0;
1665 - msg[3]= 0;
1666 - msg[4] = (u32)cmd;
1667 + msg[3] = 0;
1668 + if (sizeof(cmd) > sizeof(u32))
1669 + msg[4] = (u32)cmd->serial_number;
1670 + else
1671 + msg[4] = (u32)cmd; /* EVIL, not 64 bit safe, but faster */
1672 if( (rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER)) != 0){
1673 if(rcode == -EOPNOTSUPP ){
1674 printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name);
1675 @@ -691,8 +875,8 @@
1676 return FAILED;
1677 }
1678 memset(msg, 0, sizeof(msg));
1679 - msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
1680 - msg[1] = (I2O_DEVICE_RESET<<24|HOST_TID<<12|d->tid);
1681 + msg[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE|SGL_OFFSET_0);
1682 + msg[1] = cpu_to_le32(I2O_DEVICE_RESET<<24|HOST_TID<<12|d->tid);
1683 msg[2] = 0;
1684 msg[3] = 0;
1685
1686 @@ -720,12 +904,14 @@
1687 {
1688 adpt_hba* pHba;
1689 u32 msg[4];
1690 + int channel;
1691
1692 + channel = cmd->device->channel;
1693 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
1694 memset(msg, 0, sizeof(msg));
1695 - printk(KERN_WARNING"%s: Bus reset: SCSI Bus %d: tid: %d\n",pHba->name, cmd->device->channel,pHba->channel[cmd->device->channel].tid );
1696 - msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
1697 - msg[1] = (I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[cmd->device->channel].tid);
1698 + printk(KERN_WARNING"%s: Bus reset: SCSI Bus %d: tid: %d\n",pHba->name, channel,pHba->channel[channel].tid );
1699 + msg[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE|SGL_OFFSET_0);
1700 + msg[1] = cpu_to_le32(I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[channel].tid);
1701 msg[2] = 0;
1702 msg[3] = 0;
1703 if(adpt_i2o_post_wait(pHba, (void*)msg,sizeof(msg), FOREVER) ){
1704 @@ -742,8 +928,11 @@
1705 {
1706 adpt_hba* pHba;
1707 int rcode;
1708 + int channel;
1709 +
1710 + channel = cmd->device->channel;
1711 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
1712 - printk(KERN_WARNING"%s: Hba Reset: scsi id %d: tid: %d\n",pHba->name,cmd->device->channel,pHba->channel[cmd->device->channel].tid );
1713 + printk(KERN_WARNING"%s: Hba Reset: scsi id %d: tid: %d\n",pHba->name,channel,pHba->channel[channel].tid );
1714 rcode = adpt_hba_reset(pHba);
1715 if(rcode == 0){
1716 printk(KERN_WARNING"%s: HBA reset complete\n",pHba->name);
1717 @@ -789,6 +978,8 @@
1718 adpt_i2o_delete_hba(pHba);
1719 return rcode;
1720 }
1721 + adpt_inquiry(pHba);
1722 +
1723 pHba->state &= ~DPTI_STATE_RESET;
1724
1725 adpt_fail_posted_scbs(pHba);
1726 @@ -862,12 +1053,13 @@
1727 ulong base_addr1_phys = 0;
1728 u32 hba_map0_area_size = 0;
1729 u32 hba_map1_area_size = 0;
1730 - ulong base_addr_virt = 0;
1731 - ulong msg_addr_virt = 0;
1732 + char * base_addr_virt = 0;
1733 + char * msg_addr_virt = 0;
1734
1735 int raptorFlag = FALSE;
1736 int i;
1737
1738 +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0))
1739 if(pci_enable_device(pDev)) {
1740 return -EINVAL;
1741 }
1742 @@ -894,14 +1086,75 @@
1743 }
1744
1745
1746 - base_addr_virt = (ulong)ioremap(base_addr0_phys,hba_map0_area_size);
1747 +#else /* 2.2.* kernel method */
1748 + u16 command = 0;
1749 + u16 subdevice = 0;
1750 +
1751 + // Read in the command register and make sure that the device is
1752 + // enabled and set up for bus master
1753 + pci_read_config_word(pDev, PCI_COMMAND, &command);
1754 + if(((command & PCI_COMMAND_MEMORY) && !(command & PCI_COMMAND_MASTER))){
1755 + command |= PCI_COMMAND_MASTER;
1756 + pci_write_config_word(pDev, PCI_COMMAND, command);
1757 + }
1758 +
1759 + pci_read_config_dword(pDev, PCI_BASE_ADDRESS_0,(u32*)&base_addr0_phys);
1760 + // To get the size of the memory space taken we have to write out
1761 + // 0xffffffff (32 bit) to the base register (PCI_BASE_ADDRESS_0)
1762 + // and then read it back. The lower 4 bits are ignored not needed.
1763 + // They contain address space flag (io or memory)
1764 + // and memory type (32 1M 64)
1765 + // The Rest is used to determine the size of memory space
1766 + // used. All other upper bits will be set to ones.
1767 + // If we take the negative of this number and add one
1768 + // it will give us the memory size. We must also write the original
1769 + // Base address back out to reset it up.
1770 + pci_write_config_dword(pDev, PCI_BASE_ADDRESS_0 , 0xffffffff);
1771 + pci_read_config_dword(pDev, PCI_BASE_ADDRESS_0 , &hba_map0_area_size);
1772 +
1773 + // Restore the base address
1774 + pci_write_config_dword(pDev, PCI_BASE_ADDRESS_0 , (u32)base_addr0_phys);
1775 + (u32)base_addr0_phys &= PCI_BASE_ADDRESS_MEM_MASK;
1776 +
1777 + // Take the negative, disreguard the bottem four bits and add 1
1778 + hba_map0_area_size &= PCI_BASE_ADDRESS_MEM_MASK; // And out the lower 4 bits
1779 + hba_map0_area_size = ~hba_map0_area_size + 1; // Take the negative and add 1
1780 +
1781 + pci_read_config_word (pDev, PCI_SUBSYSTEM_ID, &subdevice);
1782 +
1783 + if(pDev->device == PCI_DPT_DEVICE_ID){
1784 + // Raptor card with this device id needs 4M
1785 + if(subdevice >= 0xc032 && subdevice <= 0xc03b){
1786 + hba_map0_area_size = 0x400000;
1787 + } else {
1788 + if(hba_map0_area_size > 0x100000) { // Only give 'em 1M
1789 + hba_map0_area_size = 0x100000;
1790 + }
1791 + }
1792 + } else {
1793 + //Use BAR1 in this config
1794 + pci_read_config_dword(pDev,PCI_BASE_ADDRESS_1, (u32*)&base_addr1_phys);
1795 + pci_write_config_dword(pDev,PCI_BASE_ADDRESS_1, 0xffffffff);
1796 + pci_read_config_dword(pDev,PCI_BASE_ADDRESS_1, &hba_map1_area_size);
1797 +
1798 + //Restore the base address
1799 + pci_write_config_dword(pDev,PCI_BASE_ADDRESS_1, (u32)base_addr1_phys);
1800 + (u32)base_addr1_phys &= PCI_BASE_ADDRESS_MEM_MASK;
1801 + hba_map1_area_size &= PCI_BASE_ADDRESS_MEM_MASK;
1802 + hba_map1_area_size = ~hba_map1_area_size + 1;
1803 +
1804 + raptorFlag=TRUE;
1805 + }
1806 +#endif
1807 +
1808 + base_addr_virt = (char *)ioremap(base_addr0_phys,hba_map0_area_size);
1809 if(base_addr_virt == 0) {
1810 PERROR("dpti: adpt_config_hba: io remap failed\n");
1811 return -EINVAL;
1812 }
1813
1814 if(raptorFlag == TRUE) {
1815 - msg_addr_virt = (ulong)ioremap(base_addr1_phys, hba_map1_area_size );
1816 + msg_addr_virt = (char *)ioremap(base_addr1_phys, hba_map1_area_size );
1817 if(msg_addr_virt == 0) {
1818 PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n");
1819 iounmap((void*)base_addr_virt);
1820 @@ -949,32 +1202,31 @@
1821 // Set up the Virtual Base Address of the I2O Device
1822 pHba->base_addr_virt = base_addr_virt;
1823 pHba->msg_addr_virt = msg_addr_virt;
1824 - pHba->irq_mask = (ulong)(base_addr_virt+0x30);
1825 - pHba->post_port = (ulong)(base_addr_virt+0x40);
1826 - pHba->reply_port = (ulong)(base_addr_virt+0x44);
1827 + pHba->irq_mask = (u32 *)(base_addr_virt+0x30);
1828 + pHba->post_port = (u32 *)(base_addr_virt+0x40);
1829 + pHba->reply_port = (u32 *)(base_addr_virt+0x44);
1830
1831 - pHba->hrt = NULL;
1832 - pHba->lct = NULL;
1833 + pHba->hrt_va = NULL;
1834 + pHba->lct_va = NULL;
1835 pHba->lct_size = 0;
1836 - pHba->status_block = NULL;
1837 + pHba->status_block_va = NULL;
1838 pHba->post_count = 0;
1839 pHba->state = DPTI_STATE_RESET;
1840 - pHba->pDev = pDev;
1841 pHba->devices = NULL;
1842
1843 // Initializing the spinlocks
1844 spin_lock_init(&pHba->state_lock);
1845
1846 if(raptorFlag == 0){
1847 - printk(KERN_INFO"Adaptec I2O RAID controller %d at %lx size=%x irq=%d\n",
1848 + printk(KERN_INFO"Adaptec I2O RAID controller %d at %p size=%x irq=%d\n",
1849 hba_count-1, base_addr_virt, hba_map0_area_size, pDev->irq);
1850 } else {
1851 printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d\n",hba_count-1, pDev->irq);
1852 - printk(KERN_INFO" BAR0 %lx - size= %x\n",base_addr_virt,hba_map0_area_size);
1853 - printk(KERN_INFO" BAR1 %lx - size= %x\n",msg_addr_virt,hba_map1_area_size);
1854 + printk(KERN_INFO" BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
1855 + printk(KERN_INFO" BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
1856 }
1857
1858 - if (request_irq (pDev->irq, adpt_isr, SA_SHIRQ, pHba->name, pHba)) {
1859 + if (request_irq (pDev->irq, adpt_isr, SA_SHIRQ, pHba->name, (void *)pHba)) {
1860 printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq);
1861 adpt_i2o_delete_hba(pHba);
1862 return -EINVAL;
1863 @@ -1026,17 +1278,17 @@
1864 if(pHba->msg_addr_virt != pHba->base_addr_virt){
1865 iounmap((void*)pHba->msg_addr_virt);
1866 }
1867 - if(pHba->hrt) {
1868 - kfree(pHba->hrt);
1869 + if(pHba->hrt_va) {
1870 + pci_free_consistent(pHba->pDev, le32_to_cpu(pHba->hrt_va->num_entries) * le32_to_cpu(pHba->hrt_va->entry_len) << 2, pHba->hrt_va, pHba->hrt_pa);
1871 }
1872 - if(pHba->lct){
1873 - kfree(pHba->lct);
1874 + if(pHba->lct_va){
1875 + pci_free_consistent(pHba->pDev, pHba->lct_size, pHba->lct_va, pHba->lct_pa);
1876 }
1877 - if(pHba->status_block) {
1878 - kfree(pHba->status_block);
1879 + if(pHba->status_block_va) {
1880 + pci_free_consistent(pHba->pDev, sizeof(i2o_status_block), pHba->status_block_va, pHba->status_block_pa);
1881 }
1882 - if(pHba->reply_pool){
1883 - kfree(pHba->reply_pool);
1884 + if(pHba->reply_pool_va){
1885 + pci_free_consistent(pHba->pDev, pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4, pHba->reply_pool_va, pHba->reply_pool_pa);
1886 }
1887
1888 for(d = pHba->devices; d ; d = next){
1889 @@ -1053,9 +1305,23 @@
1890 }
1891 }
1892 }
1893 - kfree(pHba);
1894
1895 + if (pHba->host != NULL) {
1896 + scsi_unregister(pHba->host);
1897 + }
1898 + kfree(pHba);
1899 if(hba_count <= 0){
1900 +# if (defined(__x86_64__))
1901 + unregister_ioctl32_conversion(DPT_SIGNATURE);
1902 + unregister_ioctl32_conversion(I2OUSRCMD);
1903 + unregister_ioctl32_conversion(DPT_CTRLINFO);
1904 + unregister_ioctl32_conversion(DPT_SYSINFO);
1905 + unregister_ioctl32_conversion(DPT_BLINKLED);
1906 + unregister_ioctl32_conversion(I2ORESETCMD);
1907 + unregister_ioctl32_conversion(I2ORESCANCMD);
1908 + unregister_ioctl32_conversion(DPT_TARGET_BUSY & 0xFFFF);
1909 + unregister_ioctl32_conversion(DPT_TARGET_BUSY);
1910 +# endif
1911 unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);
1912 }
1913 }
1914 @@ -1083,7 +1349,7 @@
1915
1916 if(chan < 0 || chan >= MAX_CHANNEL)
1917 return NULL;
1918 -
1919 +
1920 if( pHba->channel[chan].device == NULL){
1921 printk(KERN_DEBUG"Adaptec I2O RAID: Trying to find device before they are allocated\n");
1922 return NULL;
1923 @@ -1135,7 +1401,7 @@
1924 wait_data->next = adpt_post_wait_queue;
1925 adpt_post_wait_queue = wait_data;
1926 adpt_post_wait_id++;
1927 - adpt_post_wait_id &= 0x7fff;
1928 + adpt_post_wait_id = (adpt_post_wait_id & 0x7fff);
1929 wait_data->id = adpt_post_wait_id;
1930 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1931
1932 @@ -1144,28 +1410,80 @@
1933
1934 // this code is taken from kernel/sched.c:interruptible_sleep_on_timeout
1935 wait.task = current;
1936 +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0))
1937 + write_lock_irqsave(&waitqueue_lock,flags);
1938 + __add_wait_queue(&adpt_wq_i2o_post, &wait);
1939 + write_unlock(&waitqueue_lock);
1940 +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,65))
1941 init_waitqueue_entry(&wait, current);
1942 - spin_lock_irqsave(&adpt_wq_i2o_post.lock, flags);
1943 + spin_lock_irqsave(&adpt_wq_i2o_post.lock,flags);
1944 __add_wait_queue(&adpt_wq_i2o_post, &wait);
1945 spin_unlock(&adpt_wq_i2o_post.lock);
1946 +#else
1947 + init_waitqueue_entry(&wait, current);
1948 + wq_write_lock_irqsave(&adpt_wq_i2o_post.lock,flags);
1949 + __add_wait_queue(&adpt_wq_i2o_post, &wait);
1950 + wq_write_unlock(&adpt_wq_i2o_post.lock);
1951 +#endif
1952
1953 msg[2] |= 0x80000000 | ((u32)wait_data->id);
1954 timeout *= HZ;
1955 if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){
1956 + spinlock_t * was_locked = (spinlock_t *)NULL;
1957 set_current_state(TASK_INTERRUPTIBLE);
1958 - spin_unlock_irq(pHba->host->host_lock);
1959 - if (!timeout)
1960 + /*
1961 + * We are called before the host & host lock has been
1962 + * assigned, and may be called with, or without, the host lock
1963 + * held. We need to free the lock, if held, before going
1964 + * to sleep.
1965 + */
1966 +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,65))
1967 + if ((pHba->host != NULL) /* Sad */
1968 + && (spin_is_locked(pHba->host->host_lock))) {
1969 + was_locked = pHba->host->host_lock;
1970 + spin_unlock_irq(was_locked);
1971 + }
1972 +#else
1973 + if (spin_is_locked(&io_request_lock)) {
1974 + was_locked = &io_request_lock;
1975 + spin_unlock_irq(was_locked);
1976 + }
1977 +#endif
1978 + if(!timeout){
1979 schedule();
1980 - else
1981 - schedule_timeout(timeout*HZ);
1982 - spin_lock_irq(pHba->host->host_lock);
1983 + } else {
1984 + timeout = schedule_timeout(timeout*HZ);
1985 + if (timeout == 0) {
1986 + // I/O issued, but cannot get result in
1987 + // specified time. Freeing resources is
1988 + // dangerous.
1989 + status = -ETIME;
1990 + }
1991 + }
1992 + if (was_locked)
1993 + spin_lock_irq(was_locked);
1994 + if (signal_pending(current)) {
1995 + printk("adpt_i2o_post_wait: interrupted\n");
1996 + status = -EINTR;
1997 + }
1998 }
1999 +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0))
2000 + write_lock_irq(&waitqueue_lock);
2001 + __remove_wait_queue(&adpt_wq_i2o_post, &wait);
2002 + write_unlock_irqrestore(&waitqueue_lock,flags);
2003 +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,65))
2004 spin_lock_irq(&adpt_wq_i2o_post.lock);
2005 __remove_wait_queue(&adpt_wq_i2o_post, &wait);
2006 - spin_unlock_irqrestore(&adpt_wq_i2o_post.lock, flags);
2007 + spin_unlock_irqrestore(&adpt_wq_i2o_post.lock,flags);
2008 +#else
2009 + wq_write_lock_irq(&adpt_wq_i2o_post.lock);
2010 + __remove_wait_queue(&adpt_wq_i2o_post, &wait);
2011 + wq_write_unlock_irqrestore(&adpt_wq_i2o_post.lock,flags);
2012 +#endif
2013
2014 - if(status == -ETIMEDOUT){
2015 - printk(KERN_INFO"dpti%d: POST WAIT TIMEOUT\n",pHba->unit);
2016 + if(status == -ETIMEDOUT || status == -ETIME || status == -EINTR){
2017 + printk(KERN_INFO"dpti%d: POST WAIT FAILED (%d)\n",
2018 + pHba->unit, status);
2019 // We will have to free the wait_data memory during shutdown
2020 return status;
2021 }
2022 @@ -1252,7 +1570,7 @@
2023 }
2024 }
2025 spin_unlock(&adpt_post_wait_lock);
2026 - // If this happens we lose commands that probably really completed
2027 + // If this happens we loose commands that probably really completed
2028 printk(KERN_DEBUG"dpti: Could Not find task %d in wait queue\n",context);
2029 printk(KERN_DEBUG" Tasks in wait queue:\n");
2030 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
2031 @@ -1265,6 +1583,8 @@
2032 {
2033 u32 msg[8];
2034 u8* status;
2035 + dma_addr_t addr;
2036 + u64 addr64;
2037 u32 m = EMPTY_QUEUE ;
2038 ulong timeout = jiffies + (TMOUT_IOPRESET*HZ);
2039
2040 @@ -1277,16 +1597,16 @@
2041 do {
2042 rmb();
2043 m = readl(pHba->post_port);
2044 - if (m != EMPTY_QUEUE) {
2045 + if (m != cpu_to_le32(EMPTY_QUEUE)) {
2046 break;
2047 }
2048 if(time_after(jiffies,timeout)){
2049 printk(KERN_WARNING"Timeout waiting for message!\n");
2050 return -ETIMEDOUT;
2051 }
2052 - } while (m == EMPTY_QUEUE);
2053 + } while (m == cpu_to_le32(EMPTY_QUEUE));
2054
2055 - status = (u8*)kmalloc(4, GFP_KERNEL|ADDR32);
2056 + status = (u8*)pci_alloc_consistent(pHba->pDev, 4, &addr);
2057 if(status == NULL) {
2058 adpt_send_nop(pHba, m);
2059 printk(KERN_ERR"IOP reset failed - no free memory.\n");
2060 @@ -1294,16 +1614,17 @@
2061 }
2062 memset(status,0,4);
2063
2064 - msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
2065 - msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
2066 + msg[0]=cpu_to_le32(EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0);
2067 + msg[1]=cpu_to_le32(I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID);
2068 msg[2]=0;
2069 msg[3]=0;
2070 msg[4]=0;
2071 msg[5]=0;
2072 - msg[6]=virt_to_bus(status);
2073 - msg[7]=0;
2074 + addr64 = cpu_to_le64(addr);
2075 + msg[6]=(u32)addr64;
2076 + msg[7]=(u32)(addr64 >> 32);
2077
2078 - memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg));
2079 + memcpy_toio(pHba->msg_addr_virt+le32_to_cpu(m), msg, sizeof(msg));
2080 wmb();
2081 writel(m, pHba->post_port);
2082 wmb();
2083 @@ -1311,40 +1632,47 @@
2084 while(*status == 0){
2085 if(time_after(jiffies,timeout)){
2086 printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name);
2087 - kfree(status);
2088 + /* We loose 4 bytes of "status" here, but we cannot
2089 + free these because controller may awake and corrupt
2090 + those bytes at any time */
2091 + /* pci_free_consistent(pHba->pDev, 4, buf, addr); */
2092 return -ETIMEDOUT;
2093 }
2094 rmb();
2095 }
2096
2097 - if(*status == 0x01 /*I2O_EXEC_IOP_RESET_IN_PROGRESS*/) {
2098 + if(*status == cpu_to_le32(0x01) /*I2O_EXEC_IOP_RESET_IN_PROGRESS*/) {
2099 PDEBUG("%s: Reset in progress...\n", pHba->name);
2100 // Here we wait for message frame to become available
2101 // indicated that reset has finished
2102 do {
2103 rmb();
2104 m = readl(pHba->post_port);
2105 - if (m != EMPTY_QUEUE) {
2106 + if (m != cpu_to_le32(EMPTY_QUEUE)) {
2107 break;
2108 }
2109 if(time_after(jiffies,timeout)){
2110 printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
2111 + /* We loose 4 bytes of "status" here, but we
2112 + cannot free these because controller may
2113 + awake and corrupt those bytes at any time */
2114 + /* pci_free_consistent(pHba->pDev, 4, buf, addr); */
2115 return -ETIMEDOUT;
2116 }
2117 - } while (m == EMPTY_QUEUE);
2118 + } while (m == cpu_to_le32(EMPTY_QUEUE));
2119 // Flush the offset
2120 adpt_send_nop(pHba, m);
2121 }
2122 adpt_i2o_status_get(pHba);
2123 - if(*status == 0x02 ||
2124 - pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
2125 + if(*status == cpu_to_le32(0x02) ||
2126 + pHba->status_block_va->iop_state != cpu_to_le32(ADAPTER_STATE_RESET)) {
2127 printk(KERN_WARNING"%s: Reset reject, trying to clear\n",
2128 pHba->name);
2129 } else {
2130 PDEBUG("%s: Reset completed.\n", pHba->name);
2131 }
2132
2133 - kfree(status);
2134 + pci_free_consistent(pHba->pDev, 4, status, addr);
2135 #ifdef UARTDELAY
2136 // This delay is to allow someone attached to the card through the debug UART to
2137 // set up the dump levels that they want before the rest of the initialization sequence
2138 @@ -1360,7 +1688,7 @@
2139 int max;
2140 int tid;
2141 struct i2o_device *d;
2142 - i2o_lct *lct = pHba->lct;
2143 + i2o_lct *lct = pHba->lct_va;
2144 u8 bus_no = 0;
2145 s16 scsi_id;
2146 s16 scsi_lun;
2147 @@ -1377,7 +1705,7 @@
2148 max /= 9;
2149
2150 for(i=0;i<max;i++) {
2151 - if( lct->lct_entry[i].user_tid != 0xfff){
2152 + if( lct->lct_entry[i].user_tid != cpu_to_le32(0xfff)){
2153 /*
2154 * If we have hidden devices, we need to inform the upper layers about
2155 * the possible maximum id reference to handle device access when
2156 @@ -1385,12 +1713,12 @@
2157 * allow us future access to devices that are currently hidden
2158 * behind arrays, hotspares or have not been configured (JBOD mode).
2159 */
2160 - if( lct->lct_entry[i].class_id != I2O_CLASS_RANDOM_BLOCK_STORAGE &&
2161 - lct->lct_entry[i].class_id != I2O_CLASS_SCSI_PERIPHERAL &&
2162 - lct->lct_entry[i].class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
2163 + if( lct->lct_entry[i].class_id != cpu_to_le32(I2O_CLASS_RANDOM_BLOCK_STORAGE) &&
2164 + lct->lct_entry[i].class_id != cpu_to_le32(I2O_CLASS_SCSI_PERIPHERAL) &&
2165 + lct->lct_entry[i].class_id != cpu_to_le32(I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL) ){
2166 continue;
2167 }
2168 - tid = lct->lct_entry[i].tid;
2169 + tid = le32_to_cpu(lct->lct_entry[i].tid);
2170 // I2O_DPT_DEVICE_INFO_GROUP_NO;
2171 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
2172 continue;
2173 @@ -1402,7 +1730,7 @@
2174 printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
2175 continue;
2176 }
2177 - if (scsi_id >= MAX_ID){
2178 + if(scsi_id > MAX_ID){
2179 printk(KERN_WARNING"%s: SCSI ID %d out of range \n", pHba->name, bus_no);
2180 continue;
2181 }
2182 @@ -1430,21 +1758,21 @@
2183 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2184
2185 d->flags = 0;
2186 - tid = d->lct_data.tid;
2187 + tid = le32_to_cpu(d->lct_data.tid);
2188 adpt_i2o_report_hba_unit(pHba, d);
2189 adpt_i2o_install_device(pHba, d);
2190 }
2191 bus_no = 0;
2192 for(d = pHba->devices; d ; d = d->next) {
2193 - if(d->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT ||
2194 - d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PORT){
2195 - tid = d->lct_data.tid;
2196 + if(d->lct_data.class_id == cpu_to_le32(I2O_CLASS_BUS_ADAPTER_PORT) ||
2197 + d->lct_data.class_id == cpu_to_le32(I2O_CLASS_FIBRE_CHANNEL_PORT)){
2198 + tid = le32_to_cpu(d->lct_data.tid);
2199 // TODO get the bus_no from hrt-but for now they are in order
2200 //bus_no =
2201 if(bus_no > pHba->top_scsi_channel){
2202 pHba->top_scsi_channel = bus_no;
2203 }
2204 - pHba->channel[bus_no].type = d->lct_data.class_id;
2205 + pHba->channel[bus_no].type = le32_to_cpu(d->lct_data.class_id);
2206 pHba->channel[bus_no].tid = tid;
2207 if(adpt_i2o_query_scalar(pHba, tid, 0x0200, -1, buf, 28)>=0)
2208 {
2209 @@ -1462,11 +1790,11 @@
2210
2211 // Setup adpt_device table
2212 for(d = pHba->devices; d ; d = d->next) {
2213 - if(d->lct_data.class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
2214 - d->lct_data.class_id == I2O_CLASS_SCSI_PERIPHERAL ||
2215 - d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
2216 + if(d->lct_data.class_id == cpu_to_le32(I2O_CLASS_RANDOM_BLOCK_STORAGE) ||
2217 + d->lct_data.class_id == cpu_to_le32(I2O_CLASS_SCSI_PERIPHERAL) ||
2218 + d->lct_data.class_id == cpu_to_le32(I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL) ){
2219
2220 - tid = d->lct_data.tid;
2221 + tid = le32_to_cpu(d->lct_data.tid);
2222 scsi_id = -1;
2223 // I2O_DPT_DEVICE_INFO_GROUP_NO;
2224 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) {
2225 @@ -1476,7 +1804,7 @@
2226 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
2227 continue;
2228 }
2229 - if (scsi_id >= MAX_ID) {
2230 + if(scsi_id > MAX_ID){
2231 continue;
2232 }
2233 if( pHba->channel[bus_no].device[scsi_id] == NULL){
2234 @@ -1567,7 +1895,7 @@
2235 }
2236
2237 // if(pHba->in_use){
2238 - // up(&adpt_configuration_lock);
2239 +// up(&adpt_configuration_lock);
2240 // return -EBUSY;
2241 // }
2242
2243 @@ -1602,7 +1930,6 @@
2244 return 0;
2245 }
2246
2247 -
2248 static int adpt_i2o_passthru(adpt_hba* pHba, u32* arg)
2249 {
2250 u32 msg[MAX_MESSAGE_SIZE];
2251 @@ -1611,13 +1938,14 @@
2252 u32 reply_size = 0;
2253 u32* user_msg = (u32*)arg;
2254 u32* user_reply = NULL;
2255 - ulong sg_list[pHba->sg_tablesize];
2256 + void * sg_list[pHba->sg_tablesize];
2257 u32 sg_offset = 0;
2258 u32 sg_count = 0;
2259 int sg_index = 0;
2260 u32 i = 0;
2261 u32 rcode = 0;
2262 - ulong p = 0;
2263 + void * p = 0;
2264 + dma_addr_t addr;
2265 ulong flags = 0;
2266
2267 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
2268 @@ -1651,10 +1979,34 @@
2269 memset(reply,0,REPLY_FRAME_SIZE*4);
2270 sg_offset = (msg[0]>>4)&0xf;
2271 msg[2] = 0x40000000; // IOCTL context
2272 - msg[3] = (u32)reply;
2273 + if (sizeof(reply) > sizeof(u32)) {
2274 +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,65))
2275 + spin_lock_irqsave(pHba->host->host_lock, flags);
2276 +#else
2277 + spin_lock_irqsave(&io_request_lock, flags);
2278 +#endif
2279 + for (i = 0; i < (sizeof(pHba->ioctl_reply_context) / sizeof(pHba->ioctl_reply_context[0])); ++i) {
2280 + if (pHba->ioctl_reply_context[i] == NULL) {
2281 + pHba->ioctl_reply_context[i] = reply;
2282 + break;
2283 + }
2284 + }
2285 +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,65))
2286 + spin_unlock_irqrestore(pHba->host->host_lock, flags);
2287 +#else
2288 + spin_unlock_irqrestore(&io_request_lock, flags);
2289 +#endif
2290 + if (i >= (sizeof(pHba->ioctl_reply_context) / sizeof(pHba->ioctl_reply_context[0]))) {
2291 + kfree (reply);
2292 + printk(KERN_WARNING"%s: Too many outstanding ioctl commands\n",pHba->name);
2293 + return -EBUSY;
2294 + }
2295 + msg[3] = i;
2296 + } else
2297 + msg[3] = (u32)reply; // EVIL, not 64 bit safe
2298 memset(sg_list,0, sizeof(sg_list[0])*pHba->sg_tablesize);
2299 if(sg_offset) {
2300 - // TODO 64bit fix
2301 + // TODO 64 bit fix ?
2302 struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset);
2303 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
2304 if (sg_count > pHba->sg_tablesize){
2305 @@ -1673,7 +2025,7 @@
2306 }
2307 sg_size = sg[i].flag_count & 0xffffff;
2308 /* Allocate memory for the transfer */
2309 - p = (ulong)kmalloc(sg_size, GFP_KERNEL|ADDR32);
2310 + p = pci_alloc_consistent(pHba->pDev, sg_size, &addr);
2311 if(p == 0) {
2312 printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
2313 pHba->name,sg_size,i,sg_count);
2314 @@ -1681,9 +2033,15 @@
2315 goto cleanup;
2316 }
2317 sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame.
2318 + if( (sizeof(dma_addr_t) > 4) && (pHba->pae_support) && (((u64)addr >> 32) != 0) ) {
2319 + printk(KERN_DEBUG"%s: Could not allocate SG buffer in 32 space - size = %d buffer number %d of %d\n",
2320 + pHba->name,sg_size,i,sg_count);
2321 + rcode = -ENOMEM;
2322 + goto cleanup;
2323 + }
2324 /* Copy in the user's SG buffer if necessary */
2325 if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) {
2326 - // TODO 64bit fix
2327 + // TODO 64 bit fix?
2328 if (copy_from_user((void*)p,(void*)sg[i].addr_bus, sg_size)) {
2329 printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
2330 rcode = -EFAULT;
2331 @@ -1691,12 +2049,16 @@
2332 }
2333 }
2334 //TODO 64bit fix
2335 - sg[i].addr_bus = (u32)virt_to_bus((void*)p);
2336 + sg[i].addr_bus = cpu_to_le32(addr);
2337 }
2338 }
2339
2340 do {
2341 +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,65))
2342 spin_lock_irqsave(pHba->host->host_lock, flags);
2343 +#else
2344 + spin_lock_irqsave(&io_request_lock, flags);
2345 +#endif
2346 // This state stops any new commands from enterring the
2347 // controller while processing the ioctl
2348 // pHba->state |= DPTI_STATE_IOCTL;
2349 @@ -1704,7 +2066,11 @@
2350 // the queue empties and stops. We need a way to restart the queue
2351 rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
2352 // pHba->state &= ~DPTI_STATE_IOCTL;
2353 +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,65))
2354 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2355 +#else
2356 + spin_unlock_irqrestore(&io_request_lock, flags);
2357 +#endif
2358 } while(rcode == -ETIMEDOUT);
2359
2360 if(rcode){
2361 @@ -1741,8 +2107,8 @@
2362 if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) {
2363 sg_size = sg[j].flag_count & 0xffffff;
2364 // TODO 64bit fix
2365 - if (copy_to_user((void*)sg[j].addr_bus,(void*)sg_list[j], sg_size)) {
2366 - printk(KERN_WARNING"%s: Could not copy %lx TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
2367 + if (copy_to_user((void*)sg[j].addr_bus,sg_list[j], sg_size)) {
2368 + printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
2369 rcode = -EFAULT;
2370 goto cleanup;
2371 }
2372 @@ -1765,10 +2131,13 @@
2373
2374
2375 cleanup:
2376 - kfree (reply);
2377 - while(sg_index) {
2378 - if(sg_list[--sg_index]) {
2379 - kfree((void*)(sg_list[sg_index]));
2380 + if (rcode != -ETIME && rcode != -EINTR) {
2381 + struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset);
2382 + kfree (reply);
2383 + while(sg_index) {
2384 + if(sg_list[--sg_index]) {
2385 + pci_free_consistent(pHba->pDev, sg[sg_index].flag_count & 0xffffff, sg_list[sg_index], le32_to_cpu(sg[sg_index].addr_bus));
2386 + }
2387 }
2388 }
2389 return rcode;
2390 @@ -1796,11 +2165,11 @@
2391
2392 #if defined __i386__
2393 adpt_i386_info(&si);
2394 -#elif defined (__ia64__)
2395 +#elif defined __ia64__
2396 adpt_ia64_info(&si);
2397 -#elif defined(__sparc__)
2398 +#elif defined __sparc__
2399 adpt_sparc_info(&si);
2400 -#elif defined (__alpha__)
2401 +#elif defined __alpha__
2402 adpt_alpha_info(&si);
2403 #else
2404 si.processorType = 0xff ;
2405 @@ -1819,7 +2188,7 @@
2406 // This is all the info we need for now
2407 // We will add more info as our new
2408 // managmenent utility requires it
2409 - si->processorType = PROC_IA64;
2410 + si->processorType = PROC_ITANIUM;
2411 }
2412 #endif
2413
2414 @@ -1894,7 +2263,7 @@
2415 }
2416
2417 while((volatile u32) pHba->state & DPTI_STATE_RESET ) {
2418 - set_task_state(current,TASK_UNINTERRUPTIBLE);
2419 + set_current_state(TASK_UNINTERRUPTIBLE);
2420 schedule_timeout(2);
2421
2422 }
2423 @@ -1942,13 +2311,51 @@
2424 break;
2425 }
2426 case I2ORESETCMD:
2427 +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,65))
2428 spin_lock_irqsave(pHba->host->host_lock, flags);
2429 +#else
2430 + spin_lock_irqsave(&io_request_lock, flags);
2431 +#endif
2432 adpt_hba_reset(pHba);
2433 +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,65))
2434 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2435 +#else
2436 + spin_unlock_irqrestore(&io_request_lock, flags);
2437 +#endif
2438 break;
2439 case I2ORESCANCMD:
2440 adpt_rescan(pHba);
2441 break;
2442 + case DPT_TARGET_BUSY & 0xFFFF:
2443 + case DPT_TARGET_BUSY:
2444 + {
2445 + TARGET_BUSY_T busy;
2446 + struct adpt_device* d;
2447 +
2448 + if (copy_from_user((void*)&busy, (void*)arg, sizeof(TARGET_BUSY_T))) {
2449 + return -EFAULT;
2450 + }
2451 +
2452 + d = adpt_find_device(pHba, busy.channel, busy.id, busy.lun);
2453 + if(d == NULL){
2454 + return -ENODEV;
2455 + }
2456 + busy.isBusy = ((d->pScsi_dev)
2457 +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0))
2458 + && (atomic_read(&d->pScsi_dev->access_count)
2459 + || test_bit(SHOST_RECOVERY, &pHba->host->shost_state)));
2460 +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0))
2461 + && (d->pScsi_dev->device_busy /* Imperfect */
2462 + || test_bit(SHOST_RECOVERY, &pHba->host->shost_state)));
2463 +#else
2464 + && (d->pScsi_dev->access_count
2465 + || pHba->host->in_recovery));
2466 +#endif
2467 + if (copy_to_user ((char*)arg, &busy, sizeof(busy))) {
2468 + return -EFAULT;
2469 + }
2470 + break;
2471 + }
2472 default:
2473 return -EINVAL;
2474 }
2475 @@ -1956,77 +2363,145 @@
2476 return error;
2477 }
2478
2479 +static inline Scsi_Cmnd * adpt_cmd_from_context(adpt_hba * pHba, u32 context)
2480 +{
2481 + Scsi_Cmnd * cmd;
2482
2483 -static void adpt_isr(int irq, void *dev_id, struct pt_regs *regs)
2484 + if (context == 0)
2485 + return NULL;
2486 + if (sizeof(cmd) > sizeof(u32)) {
2487 + Scsi_Device * d;
2488 +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,65))
2489 +# if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0))
2490 + shost_for_each_device(d, pHba->host) {
2491 +# else
2492 + list_for_each_entry(d, &pHba->host->my_devices, siblings) {
2493 +# endif
2494 + unsigned long flags;
2495 + spin_lock_irqsave(&d->list_lock, flags);
2496 + list_for_each_entry(cmd, &d->cmd_list, list) {
2497 + if (((u32)cmd->serial_number == context)
2498 + || ((u32)cmd->serial_number_at_timeout == context)) {
2499 + spin_unlock_irqrestore(&d->list_lock, flags);
2500 + return cmd;
2501 + }
2502 + }
2503 + spin_unlock_irqrestore(&d->list_lock, flags);
2504 + }
2505 +#else
2506 + d = pHba->host->host_queue;
2507 + while (d) {
2508 + for (cmd = d->device_queue; cmd ; cmd = cmd->next)
2509 + if (((u32)cmd->serial_number == context)
2510 + || ((u32)cmd->serial_number_at_timeout == context))
2511 + return cmd;
2512 + d = d->next;
2513 + }
2514 +#endif
2515 + } else
2516 + return (Scsi_Cmnd*) context; /* 64 bit! */
2517 + return NULL;
2518 +}
2519 +
2520 +static irqreturn_t adpt_isr(int irq, void *dev_id, struct pt_regs *regs)
2521 {
2522 Scsi_Cmnd* cmd;
2523 adpt_hba* pHba = dev_id;
2524 u32 m;
2525 - ulong reply;
2526 + u8 * reply = (u8 *)-1L;
2527 u32 status=0;
2528 u32 context;
2529 ulong flags = 0;
2530
2531 if (pHba == NULL ){
2532 printk(KERN_WARNING"adpt_isr: NULL dev_id\n");
2533 - return;
2534 + return IRQ_NONE;
2535 }
2536 - spin_lock_irqsave(pHba->host->host_lock, flags);
2537 - while( readl(pHba->irq_mask) & I2O_INTERRUPT_PENDING_B) {
2538 +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,65))
2539 + if (pHba->host != NULL) /* Sad */
2540 + spin_lock_irqsave(pHba->host->host_lock, flags);
2541 +#else
2542 + spin_lock_irqsave(&io_request_lock, flags);
2543 +#endif
2544 + while( readl(pHba->irq_mask) & cpu_to_le32(I2O_INTERRUPT_PENDING_B)) {
2545 m = readl(pHba->reply_port);
2546 - if(m == EMPTY_QUEUE){
2547 + if(m == cpu_to_le32(EMPTY_QUEUE)){
2548 // Try twice then give up
2549 rmb();
2550 m = readl(pHba->reply_port);
2551 - if(m == EMPTY_QUEUE){
2552 + if(m == cpu_to_le32(EMPTY_QUEUE)){
2553 // This really should not happen
2554 printk(KERN_ERR"dpti: Could not get reply frame\n");
2555 goto out;
2556 }
2557 }
2558 - reply = (ulong)bus_to_virt(m);
2559 + if ((pHba->reply_pool_pa <= le32_to_cpu(m))
2560 + && (le32_to_cpu(m) < (pHba->reply_pool_pa + (pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4)))) {
2561 + reply = ((u8 *)pHba->reply_pool_va) + (le32_to_cpu(m) - pHba->reply_pool_pa);
2562 + } else {
2563 + /* Ick, we should *never* be here */
2564 + printk(KERN_ERR"dpti: replay frame not from pool\n");
2565 + reply = (u8 *)bus_to_virt(le32_to_cpu(m));
2566 + }
2567
2568 - if (readl(reply) & MSG_FAIL) {
2569 + if (readl(reply) & cpu_to_le32(MSG_FAIL)) {
2570 u32 old_m = readl(reply+28);
2571 - ulong msg;
2572 + char * msg;
2573 u32 old_context;
2574 PDEBUG("%s: Failed message\n",pHba->name);
2575 - if(old_m >= 0x100000){
2576 + if(le32_to_cpu(old_m) >= 0x100000){
2577 printk(KERN_ERR"%s: Bad preserved MFA (%x)- dropping frame\n",pHba->name,old_m);
2578 writel(m,pHba->reply_port);
2579 continue;
2580 }
2581 // Transaction context is 0 in failed reply frame
2582 - msg = (ulong)(pHba->msg_addr_virt + old_m);
2583 + msg = pHba->msg_addr_virt + le32_to_cpu(old_m);
2584 old_context = readl(msg+12);
2585 writel(old_context, reply+12);
2586 adpt_send_nop(pHba, old_m);
2587 }
2588 context = readl(reply+8);
2589 if(context & 0x40000000){ // IOCTL
2590 - ulong p = (ulong)(readl(reply+12));
2591 + u32 context = readl(reply+12);
2592 + void * p;
2593 + if (sizeof(reply) > sizeof(u32)) {
2594 + p = pHba->ioctl_reply_context[context];
2595 + pHba->ioctl_reply_context[context] = NULL;
2596 + } else
2597 + p = (void *)(readl(reply+12));
2598 if( p != 0) {
2599 - memcpy((void*)p, (void*)reply, REPLY_FRAME_SIZE * 4);
2600 + memcpy_fromio(p, (void*)reply, REPLY_FRAME_SIZE * 4);
2601 }
2602 // All IOCTLs will also be post wait
2603 }
2604 if(context & 0x80000000){ // Post wait message
2605 - status = readl(reply+16);
2606 + status = le32_to_cpu(readl(reply+16));
2607 if(status >> 24){
2608 status &= 0xffff; /* Get detail status */
2609 } else {
2610 status = I2O_POST_WAIT_OK;
2611 }
2612 if(!(context & 0x40000000)) {
2613 - cmd = (Scsi_Cmnd*) readl(reply+12);
2614 + cmd = adpt_cmd_from_context (pHba, readl(reply+12));
2615 if(cmd != NULL) {
2616 printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
2617 }
2618 }
2619 adpt_i2o_post_wait_complete(context, status);
2620 } else { // SCSI message
2621 - cmd = (Scsi_Cmnd*) readl(reply+12);
2622 + cmd = adpt_cmd_from_context (pHba, readl(reply+12));
2623 if(cmd != NULL){
2624 + if(cmd->use_sg)
2625 + pci_unmap_sg(pHba->pDev,
2626 + (struct scatterlist *)cmd->buffer,
2627 + cmd->use_sg,
2628 + scsi_to_pci_dma_dir(cmd->sc_data_direction));
2629 + else if(cmd->request_bufflen)
2630 + pci_unmap_single(pHba->pDev,
2631 + cmd->SCp.dma_handle,
2632 + cmd->request_bufflen,
2633 + scsi_to_pci_dma_dir(cmd->sc_data_direction));
2634 +
2635 if(cmd->serial_number != 0) { // If not timedout
2636 adpt_i2o_to_scsi(reply, cmd);
2637 }
2638 @@ -2036,8 +2511,28 @@
2639 wmb();
2640 rmb();
2641 }
2642 -out: spin_unlock_irqrestore(pHba->host->host_lock, flags);
2643 +out:
2644 +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,65))
2645 + if (pHba->host != NULL) /* Sad */
2646 + spin_unlock_irqrestore(pHba->host->host_lock, flags);
2647 +#else
2648 + spin_unlock_irqrestore(&io_request_lock, flags);
2649 +#endif
2650 + if (reply == (u8 *)-1) {
2651 + return IRQ_NONE;
2652 + }
2653 + return IRQ_HANDLED;
2654 +
2655 }
2656 +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0))
2657 +/*
2658 + * dpti2oscsi2.c contains a table of scsi commands that is used to determine
2659 + * the data direction of the command. It is used in dpt_scsi_to_i2o to speed
2660 + * up the building of the scsi message.
2661 + */
2662 +#include "dpti2oscsi2.c"
2663 +#endif
2664 +
2665
2666 static s32 adpt_scsi_to_i2o(adpt_hba* pHba, Scsi_Cmnd* cmd, struct adpt_device* d)
2667 {
2668 @@ -2051,6 +2546,10 @@
2669 u32 reqlen;
2670 s32 rcode;
2671
2672 +#if 0
2673 +printk (KERN_INFO"adpt_scsi_to_i2o(%p,%p,%p)\n", pHba, cmd, d);
2674 +adpt_sleep();
2675 +#endif
2676 memset(msg, 0 , sizeof(msg));
2677 len = cmd->request_bufflen;
2678 direction = 0x00000000;
2679 @@ -2063,6 +2562,34 @@
2680 * Note: Do not have to verify index is less than 0 since
2681 * cmd->cmnd[0] is an unsigned char
2682 */
2683 +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0))
2684 + if (cmd->cmnd[0] < DISKXFERTBLSIZE) {
2685 + switch (i2oscsi2diskxfer[cmd->cmnd[0]]) {
2686 + case DATAIN:
2687 + scsidir =0x40000000; // DATA IN (iop<--dev)
2688 + break;
2689 + case DATAOUT:
2690 + direction=0x04000000; // SGL OUT
2691 + scsidir =0x80000000; // DATA OUT (iop-->dev)
2692 + break;
2693 + case NODATA:
2694 + break;
2695 + case NOSUPPORT:
2696 + scsidir =0x40000000; // DATA IN (iop<--dev)
2697 + // Assume In - and continue;
2698 + break;
2699 + default:
2700 + printk(KERN_WARNING"%s: scsi opcode 0x%x not supported.\n",
2701 + pHba->name, cmd->cmnd[0]);
2702 + cmd->result = (DID_OK <<16) | (INITIATOR_ERROR << 8);
2703 + cmd->scsi_done(cmd);
2704 + return 0;
2705 + }
2706 + } else {
2707 + printk(KERN_WARNING"%s: cmd->cmnd[0] = %d is greater than table size, which is %d\n",
2708 + pHba->name, cmd->cmnd[0], DISKXFERTBLSIZE);
2709 + }
2710 +#else
2711 switch(cmd->sc_data_direction){
2712 case SCSI_DATA_READ:
2713 scsidir =0x40000000; // DATA IN (iop<--dev)
2714 @@ -2084,21 +2611,25 @@
2715 cmd->scsi_done(cmd);
2716 return 0;
2717 }
2718 +#endif
2719 }
2720 // msg[0] is set later
2721 // I2O_CMD_SCSI_EXEC
2722 - msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
2723 + msg[1] = cpu_to_le32((0xff<<24)|(HOST_TID<<12)|d->tid);
2724 msg[2] = 0;
2725 - msg[3] = (u32)cmd; /* We want the SCSI control block back */
2726 + if (sizeof(cmd) > sizeof(u32))
2727 + msg[3] = (u32)cmd->serial_number;
2728 + else
2729 + msg[3] = (u32)cmd; /* EVIL 64 bit We want the SCSI control block back */
2730 // Our cards use the transaction context as the tag for queueing
2731 // Adaptec/DPT Private stuff
2732 - msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
2733 - msg[5] = d->tid;
2734 + msg[4] = cpu_to_le32(I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16));
2735 + msg[5] = cpu_to_le32(d->tid);
2736 /* Direction, disconnect ok | sense data | simple queue , CDBLen */
2737 // I2O_SCB_FLAG_ENABLE_DISCONNECT |
2738 // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
2739 // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
2740 - msg[6] = scsidir|0x20a00000|cmd->cmd_len;
2741 + msg[6] = cpu_to_le32(scsidir|0x20a00000|cmd->cmd_len);
2742
2743 mptr=msg+7;
2744
2745 @@ -2108,37 +2639,102 @@
2746 mptr+=4;
2747 lenptr=mptr++; /* Remember me - fill in when we know */
2748 reqlen = 14; // SINGLE SGE
2749 + /* The following test gets optimized out if dma_addr_t is <= 32 bits */
2750 + if( (sizeof(dma_addr_t) > 4) && (pHba->pae_support)) {
2751 + *mptr++ = cpu_to_le32((0x7C<<24)+(2<<16)+0x02); /* Enable 64 bit */
2752 + *mptr++ = cpu_to_le32(1 << PAGE_SHIFT);
2753 + reqlen += 2;
2754 + }
2755 /* Now fill in the SGList and command */
2756 if(cmd->use_sg) {
2757 struct scatterlist *sg = (struct scatterlist *)cmd->request_buffer;
2758 + int sg_count = pci_map_sg(pHba->pDev, sg, cmd->use_sg,
2759 + scsi_to_pci_dma_dir(cmd->sc_data_direction));
2760 len = 0;
2761 - for(i = 0 ; i < cmd->use_sg; i++) {
2762 - *mptr++ = direction|0x10000000|sg->length;
2763 - len+=sg->length;
2764 - *mptr++ = virt_to_bus(sg->address);
2765 - sg++;
2766 + if( (sizeof(dma_addr_t) > 4) && (pHba->pae_support)) {
2767 + for(i = 0 ; i < sg_count; i++) {
2768 + dma_addr_t addr;
2769 + *mptr++ = cpu_to_le32(direction|0x10000000|sg_dma_len(sg));
2770 + len+=sg_dma_len(sg);
2771 + addr = sg_dma_address(sg);
2772 + *mptr++ = cpu_to_le32(addr);
2773 + *mptr++ = cpu_to_le32((u64)addr >> 32);
2774 + sg++;
2775 + }
2776 + /* Make this an end of list */
2777 + mptr[-3] = cpu_to_le32(direction|0xD0000000|sg_dma_len(sg-1));
2778 +#if 0
2779 +reqlen = mptr - msg;
2780 +*lenptr = cpu_to_le32(len);
2781 +msg[0] = cpu_to_le32(reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0));
2782 +printk(KERN_INFO"Message64=");
2783 +{int i;
2784 +for (i=0; i<reqlen; ++i)
2785 +printk("%c%08x", (i?' ':'{'), msg[i]);
2786 +}
2787 +printk("}\n");
2788 +adpt_sleep();
2789 +#endif
2790 + } else {
2791 + for(i = 0 ; i < sg_count; i++) {
2792 + *mptr++ = cpu_to_le32(direction|0x10000000|sg_dma_len(sg));
2793 + len+=sg_dma_len(sg);
2794 + *mptr++ = cpu_to_le32(sg_dma_address(sg));
2795 + sg++;
2796 + }
2797 + /* Make this an end of list */
2798 + mptr[-2] = cpu_to_le32(direction|0xD0000000|sg_dma_len(sg-1));
2799 +#if 0
2800 +reqlen = mptr - msg;
2801 +*lenptr = cpu_to_le32(len);
2802 +msg[0] = cpu_to_le32(reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0));
2803 +printk(KERN_INFO"Message32=");
2804 +{int i;
2805 +for (i=0; i<reqlen; ++i)
2806 +printk("%c%08x", (i?' ':'{'), msg[i]);
2807 +}
2808 +printk("}\n");
2809 +adpt_sleep();
2810 +#endif
2811 }
2812 - /* Make this an end of list */
2813 - mptr[-2] = direction|0xD0000000|(sg-1)->length;
2814 reqlen = mptr - msg;
2815 - *lenptr = len;
2816 + *lenptr = cpu_to_le32(len);
2817
2818 if(cmd->underflow && len != cmd->underflow){
2819 printk(KERN_WARNING"Cmd len %08X Cmd underflow %08X\n",
2820 len, cmd->underflow);
2821 }
2822 } else {
2823 - *lenptr = len = cmd->request_bufflen;
2824 + len = cmd->request_bufflen;
2825 +
2826 + *lenptr = cpu_to_le32(len);
2827 if(len == 0) {
2828 reqlen = 12;
2829 } else {
2830 - *mptr++ = 0xD0000000|direction|cmd->request_bufflen;
2831 - *mptr++ = virt_to_bus(cmd->request_buffer);
2832 - }
2833 + *mptr++ = cpu_to_le32(0xD0000000|direction|cmd->request_bufflen);
2834 + cmd->SCp.dma_handle = pci_map_single(pHba->pDev,
2835 + cmd->request_buffer,
2836 + len, scsi_to_pci_dma_dir(cmd->sc_data_direction));
2837 + *mptr++ = cpu_to_le32(cmd->SCp.dma_handle);
2838 + if( (sizeof(dma_addr_t) > 4) && (pHba->pae_support)) {
2839 + *mptr++ = cpu_to_le32((u64)cmd->SCp.dma_handle >> 32);
2840 + ++reqlen;
2841 + }
2842 + }
2843 +#if 0
2844 +msg[0] = cpu_to_le32(reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0));
2845 +printk(KERN_INFO"Message=");
2846 +{int i;
2847 +for (i=0; i<reqlen; ++i)
2848 +printk("%c%08x", (i?' ':'{'), msg[i]);
2849 +}
2850 +printk("}\n");
2851 +adpt_sleep();
2852 +#endif
2853 }
2854
2855 /* Stick the headers on */
2856 - msg[0] = reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0);
2857 + msg[0] = cpu_to_le32(reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0));
2858
2859 // Send it on it's way
2860 rcode = adpt_i2o_post_this(pHba, msg, reqlen<<2);
2861 @@ -2161,7 +2757,22 @@
2862 (adpt_hba*)(host->hostdata[0]) = pHba;
2863 pHba->host = host;
2864
2865 - host->irq = pHba->pDev->irq;;
2866 + /*
2867 + * Only enable PAE mode if the dma_addr_t is larger than
2868 + * 32 bit addressing, and we have more than 32 bit addressing
2869 + * worth of memory.
2870 + */
2871 + if( (sizeof(dma_addr_t) > 4)
2872 + && (num_physpages > (0xFFFFFFFFULL >> PAGE_SHIFT))) {
2873 + pHba->pae_support = 1;
2874 + pci_set_dma_mask(pHba->pDev, (dma_addr_t)0xFFFFFFFFFFFFFFFFULL);
2875 +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,18)) && defined(CONFIG_HIGHMEM) && ((LINUX_VERSION_CODE != KERNEL_VERSION(2,4,19)) || defined(CONFIG_HIGHIO))
2876 +#if 0
2877 + host->highmem_io = 1;
2878 +#endif
2879 +#endif
2880 + }
2881 + host->irq = pHba->pDev->irq;
2882 /* no IO ports, so don't have to set host->io_port and
2883 * host->n_io_port
2884 */
2885 @@ -2171,8 +2782,8 @@
2886 host->max_id = 16;
2887 host->max_lun = 256;
2888 host->max_channel = pHba->top_scsi_channel + 1;
2889 - host->cmd_per_lun = 1;
2890 - host->unique_id = (uint) pHba;
2891 + host->cmd_per_lun = 256;
2892 + host->unique_id = (uint) pHba; /* 64 bit */
2893 host->sg_tablesize = pHba->sg_tablesize;
2894 host->can_queue = pHba->post_fifo_size;
2895
2896 @@ -2180,21 +2791,26 @@
2897 }
2898
2899
2900 -static s32 adpt_i2o_to_scsi(ulong reply, Scsi_Cmnd* cmd)
2901 +static s32 adpt_i2o_to_scsi(u8 * reply, Scsi_Cmnd* cmd)
2902 {
2903 adpt_hba* pHba;
2904 u32 hba_status;
2905 u32 dev_status;
2906 - u32 reply_flags = readl(reply) & 0xff00; // Leave it shifted up 8 bits
2907 + u32 reply_flags = le32_to_cpu(readl(reply)) & 0xff00; // Leave it shifted up 8 bits
2908 // I know this would look cleaner if I just read bytes
2909 // but the model I have been using for all the rest of the
2910 // io is in 4 byte words - so I keep that model
2911 - u16 detailed_status = readl(reply+16) &0xffff;
2912 + u16 detailed_status = le32_to_cpu(readl(reply+16)) &0xffff;
2913 dev_status = (detailed_status & 0xff);
2914 hba_status = detailed_status >> 8;
2915 +//if (hba_status == 1) {
2916 +// printk ("ReplyFrame=%08x %08x %08x %08x %08x %08x\n", le32_to_cpu(readl(reply)), le32_to_cpu(readl(reply+4)), le32_to_cpu(readl(reply+8)), le32_to_cpu(readl(reply+12)), le32_to_cpu(readl(reply+16)), le32_to_cpu(readl(reply+20)));
2917 +//}
2918
2919 +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0))
2920 // calculate resid for sg
2921 - cmd->resid = cmd->request_bufflen - readl(reply+5);
2922 + cmd->resid = cmd->request_bufflen - le32_to_cpu(readl(reply+5));
2923 +#endif
2924
2925 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
2926
2927 @@ -2205,7 +2821,7 @@
2928 case I2O_SCSI_DSC_SUCCESS:
2929 cmd->result = (DID_OK << 16);
2930 // handle underflow
2931 - if(readl(reply+5) < cmd->underflow ) {
2932 + if(le32_to_cpu(readl(reply+5)) < cmd->underflow ) {
2933 cmd->result = (DID_ERROR <<16);
2934 printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name);
2935 }
2936 @@ -2220,7 +2836,9 @@
2937 case I2O_SCSI_DSC_NO_ADAPTER:
2938 case I2O_SCSI_DSC_RESOURCE_UNAVAILABLE:
2939 printk(KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%d) hba status=0x%x, dev status=0x%x, cmd=0x%x\n",
2940 - pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]);
2941 + pHba->name,
2942 + (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2943 + hba_status, dev_status, cmd->cmnd[0]);
2944 cmd->result = (DID_TIME_OUT << 16);
2945 break;
2946 case I2O_SCSI_DSC_ADAPTER_BUSY:
2947 @@ -2260,7 +2878,8 @@
2948 case I2O_SCSI_DSC_REQUEST_INVALID:
2949 default:
2950 printk(KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%d) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2951 - pHba->name, detailed_status & I2O_SCSI_DSC_MASK, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2952 + pHba->name, detailed_status & I2O_SCSI_DSC_MASK,
2953 + (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2954 hba_status, dev_status, cmd->cmnd[0]);
2955 cmd->result = (DID_ERROR << 16);
2956 break;
2957 @@ -2272,13 +2891,14 @@
2958 u32 len = sizeof(cmd->sense_buffer);
2959 len = (len > 40) ? 40 : len;
2960 // Copy over the sense data
2961 - memcpy(cmd->sense_buffer, (void*)(reply+28) , len);
2962 + memcpy_fromio(cmd->sense_buffer, (void*)(reply+28) , len);
2963 if(cmd->sense_buffer[0] == 0x70 /* class 7 */ &&
2964 cmd->sense_buffer[2] == DATA_PROTECT ){
2965 /* This is to handle an array failed */
2966 cmd->result = (DID_TIME_OUT << 16);
2967 printk(KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%d) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2968 - pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2969 + pHba->name,
2970 + (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2971 hba_status, dev_status, cmd->cmnd[0]);
2972
2973 }
2974 @@ -2290,7 +2910,8 @@
2975 */
2976 cmd->result = (DID_TIME_OUT << 16);
2977 printk(KERN_WARNING"%s: I2O MSG_FAIL - Device (%d,%d,%d) tid=%d, cmd=0x%x\n",
2978 - pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2979 + pHba->name,
2980 + (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2981 ((struct adpt_device*)(cmd->device->hostdata))->tid, cmd->cmnd[0]);
2982 }
2983
2984 @@ -2308,13 +2929,25 @@
2985 s32 rcode;
2986 ulong flags;
2987
2988 +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,65))
2989 spin_lock_irqsave(pHba->host->host_lock, flags);
2990 - if ((rcode=adpt_i2o_lct_get(pHba)) < 0)
2991 +#else
2992 + spin_lock_irqsave(&io_request_lock, flags);
2993 +#endif
2994 + if ((rcode=adpt_i2o_lct_get(pHba)) < 0){
2995 goto out;
2996 - if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0)
2997 + }
2998 +
2999 + if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0){
3000 goto out;
3001 + }
3002 rcode = 0;
3003 -out: spin_unlock_irqrestore(pHba->host->host_lock, flags);
3004 +out:
3005 +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,65))
3006 + spin_unlock_irqrestore(pHba->host->host_lock, flags);
3007 +#else
3008 + spin_unlock_irqrestore(&io_request_lock, flags);
3009 +#endif
3010 return rcode;
3011 }
3012
3013 @@ -2325,7 +2958,7 @@
3014 int max;
3015 int tid;
3016 struct i2o_device *d;
3017 - i2o_lct *lct = pHba->lct;
3018 + i2o_lct *lct = pHba->lct_va;
3019 u8 bus_no = 0;
3020 s16 scsi_id;
3021 s16 scsi_lun;
3022 @@ -2354,14 +2987,14 @@
3023 printk(KERN_INFO "%s: LCT has %d entries.\n", pHba->name,max);
3024
3025 for(i=0;i<max;i++) {
3026 - if( lct->lct_entry[i].user_tid != 0xfff){
3027 + if( lct->lct_entry[i].user_tid != cpu_to_le32(0xfff)){
3028 continue;
3029 }
3030
3031 - if( lct->lct_entry[i].class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
3032 - lct->lct_entry[i].class_id == I2O_CLASS_SCSI_PERIPHERAL ||
3033 - lct->lct_entry[i].class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
3034 - tid = lct->lct_entry[i].tid;
3035 + if( lct->lct_entry[i].class_id == cpu_to_le32(I2O_CLASS_RANDOM_BLOCK_STORAGE) ||
3036 + lct->lct_entry[i].class_id == cpu_to_le32(I2O_CLASS_SCSI_PERIPHERAL) ||
3037 + lct->lct_entry[i].class_id == cpu_to_le32(I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL) ){
3038 + tid = le32_to_cpu(lct->lct_entry[i].tid);
3039 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
3040 printk(KERN_ERR"%s: Could not query device\n",pHba->name);
3041 continue;
3042 @@ -2415,7 +3048,7 @@
3043 }
3044 }
3045 memset(pDev,0,sizeof(struct adpt_device));
3046 - pDev->tid = d->lct_data.tid;
3047 + pDev->tid = le32_to_cpu(d->lct_data.tid);
3048 pDev->scsi_channel = bus_no;
3049 pDev->scsi_id = scsi_id;
3050 pDev->scsi_lun = scsi_lun;
3051 @@ -2436,18 +3069,30 @@
3052 // We found an old device - check it
3053 while(pDev) {
3054 if(pDev->scsi_lun == scsi_lun) {
3055 - if(pDev->pScsi_dev->online == FALSE) {
3056 + /*
3057 + * Can not set a device to
3058 + * changed when in eh code
3059 + * as it causes a recurse stack
3060 + * panic timebomb.
3061 + */
3062 + int change_ok = pDev->pScsi_dev
3063 +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,1))
3064 + && !test_bit(SHOST_RECOVERY, &pHba->host->shost_state);
3065 +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(2,6,0))
3066 + && !test_bit(SHOST_RECOVERY, &pHba->host->shost_state);
3067 +#else
3068 + && !pHba->host->in_recovery;
3069 +#endif
3070 + if(pDev->pScsi_dev && (pDev->pScsi_dev->online == FALSE)) {
3071 printk(KERN_WARNING"%s: Setting device (%d,%d,%d) back online\n",
3072 pHba->name,bus_no,scsi_id,scsi_lun);
3073 - if (pDev->pScsi_dev) {
3074 - pDev->pScsi_dev->online = TRUE;
3075 - }
3076 + pDev->pScsi_dev->online = TRUE;
3077 }
3078 d = pDev->pI2o_dev;
3079 - if(d->lct_data.tid != tid) { // something changed
3080 + if(le32_to_cpu(d->lct_data.tid) != tid) { // something changed
3081 pDev->tid = tid;
3082 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
3083 - if (pDev->pScsi_dev) {
3084 + if (change_ok) {
3085 pDev->pScsi_dev->changed = TRUE;
3086 pDev->pScsi_dev->removable = TRUE;
3087 }
3088 @@ -2472,6 +3117,20 @@
3089 printk(KERN_WARNING"%s: Device (%d,%d,%d) offline\n",pHba->name,pDev->scsi_channel,pDev->scsi_id,pDev->scsi_lun);
3090 if (pDev->pScsi_dev) {
3091 pDev->pScsi_dev->online = FALSE;
3092 +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0))
3093 + if (atomic_read(&pDev->pScsi_dev->access_count)) {
3094 +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0))
3095 + if (pDev->pScsi_dev->device_busy) {
3096 +#else
3097 + if (pDev->pScsi_dev->access_count) {
3098 +#endif
3099 + // A drive that was mounted is no longer there... bad!
3100 +#ifdef SCSI_LOG_ERROR_RECOVERY
3101 + SCSI_LOG_ERROR_RECOVERY(1, printk ("%s:Rescan: Previously "
3102 + "mounted drive not found!\n",pHba->name));
3103 +#endif
3104 + printk(KERN_WARNING"%s:Mounted drive taken offline\n",pHba->name);
3105 + }
3106 }
3107 }
3108 }
3109 @@ -2481,20 +3140,38 @@
3110 static void adpt_fail_posted_scbs(adpt_hba* pHba)
3111 {
3112 Scsi_Cmnd* cmd = NULL;
3113 - Scsi_Device* d = NULL;
3114 + Scsi_Device* d;
3115
3116 +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,65))
3117 +# if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0))
3118 shost_for_each_device(d, pHba->host) {
3119 +# else
3120 + list_for_each_entry(d, &pHba->host->my_devices, siblings) {
3121 +# endif
3122 unsigned long flags;
3123 spin_lock_irqsave(&d->list_lock, flags);
3124 list_for_each_entry(cmd, &d->cmd_list, list) {
3125 + if (cmd->serial_number == 0) {
3126 + continue;
3127 + }
3128 + cmd->result = (DID_OK << 16) | (QUEUE_FULL << 1);
3129 + cmd->scsi_done(cmd);
3130 + }
3131 + spin_unlock_irqrestore(&d->list_lock, flags);
3132 + }
3133 +#else
3134 + d = pHba->host->host_queue;
3135 + while( d != NULL ){
3136 + for(cmd = d->device_queue; cmd ; cmd = cmd->next){
3137 if(cmd->serial_number == 0){
3138 continue;
3139 }
3140 cmd->result = (DID_OK << 16) | (QUEUE_FULL <<1);
3141 cmd->scsi_done(cmd);
3142 }
3143 - spin_unlock_irqrestore(&d->list_lock, flags);
3144 + d = d->next;
3145 }
3146 +#endif
3147 }
3148
3149
3150 @@ -2524,17 +3201,17 @@
3151 }
3152 }
3153
3154 - if(pHba->status_block->iop_state == ADAPTER_STATE_FAULTED) {
3155 + if(pHba->status_block_va->iop_state == cpu_to_le32(ADAPTER_STATE_FAULTED)) {
3156 printk(KERN_CRIT "%s: hardware fault\n", pHba->name);
3157 return -1;
3158 }
3159
3160 - if (pHba->status_block->iop_state == ADAPTER_STATE_READY ||
3161 - pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL ||
3162 - pHba->status_block->iop_state == ADAPTER_STATE_HOLD ||
3163 - pHba->status_block->iop_state == ADAPTER_STATE_FAILED) {
3164 + if (pHba->status_block_va->iop_state == cpu_to_le32(ADAPTER_STATE_READY) ||
3165 + pHba->status_block_va->iop_state == cpu_to_le32(ADAPTER_STATE_OPERATIONAL) ||
3166 + pHba->status_block_va->iop_state == cpu_to_le32(ADAPTER_STATE_HOLD) ||
3167 + pHba->status_block_va->iop_state == cpu_to_le32(ADAPTER_STATE_FAILED)) {
3168 adpt_i2o_reset_hba(pHba);
3169 - if (adpt_i2o_status_get(pHba) < 0 || pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
3170 + if (adpt_i2o_status_get(pHba) < 0 || pHba->status_block_va->iop_state != cpu_to_le32(ADAPTER_STATE_RESET)) {
3171 printk(KERN_ERR "%s: Failed to initialize.\n", pHba->name);
3172 return -1;
3173 }
3174 @@ -2586,10 +3263,10 @@
3175 u32 *msg;
3176 ulong timeout = jiffies + 5*HZ;
3177
3178 - while(m == EMPTY_QUEUE){
3179 + while(m == cpu_to_le32(EMPTY_QUEUE)){
3180 rmb();
3181 m = readl(pHba->post_port);
3182 - if(m != EMPTY_QUEUE){
3183 + if(m != cpu_to_le32(EMPTY_QUEUE)){
3184 break;
3185 }
3186 if(time_after(jiffies,timeout)){
3187 @@ -2597,9 +3274,9 @@
3188 return 2;
3189 }
3190 }
3191 - msg = (u32*)(pHba->msg_addr_virt + m);
3192 - writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]);
3193 - writel( I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0,&msg[1]);
3194 + msg = (u32*)(pHba->msg_addr_virt + le32_to_cpu(m));
3195 + writel( cpu_to_le32(THREE_WORD_MSG_SIZE | SGL_OFFSET_0),&msg[0]);
3196 + writel( cpu_to_le32(I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0),&msg[1]);
3197 writel( 0,&msg[2]);
3198 wmb();
3199
3200 @@ -2611,17 +3288,16 @@
3201 static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
3202 {
3203 u8 *status;
3204 + dma_addr_t addr;
3205 u32 *msg = NULL;
3206 int i;
3207 ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ;
3208 - u32* ptr;
3209 - u32 outbound_frame; // This had to be a 32 bit address
3210 u32 m;
3211
3212 do {
3213 rmb();
3214 m = readl(pHba->post_port);
3215 - if (m != EMPTY_QUEUE) {
3216 + if (m != cpu_to_le32(EMPTY_QUEUE)) {
3217 break;
3218 }
3219
3220 @@ -2629,27 +3305,34 @@
3221 printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name);
3222 return -ETIMEDOUT;
3223 }
3224 - } while(m == EMPTY_QUEUE);
3225 + } while(m == cpu_to_le32(EMPTY_QUEUE));
3226
3227 - msg=(u32 *)(pHba->msg_addr_virt+m);
3228 + msg=(u32 *)(pHba->msg_addr_virt+le32_to_cpu(m));
3229
3230 - status = kmalloc(4,GFP_KERNEL|ADDR32);
3231 + status = (u8*)pci_alloc_consistent(pHba->pDev, 4, &addr);
3232 if (status==NULL) {
3233 adpt_send_nop(pHba, m);
3234 printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n",
3235 pHba->name);
3236 return -ENOMEM;
3237 }
3238 + if( (sizeof(dma_addr_t) > 4) && (pHba->pae_support) && (((u64)addr >> 32) != 0) ) {
3239 + pci_free_consistent(pHba->pDev, 4, status, addr);
3240 + adpt_send_nop(pHba, m);
3241 + printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n",
3242 + pHba->name);
3243 + return -ENOMEM;
3244 + }
3245 memset(status, 0, 4);
3246
3247 - writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]);
3248 - writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]);
3249 + writel( cpu_to_le32(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6), &msg[0]);
3250 + writel( cpu_to_le32(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID), &msg[1]);
3251 writel(0, &msg[2]);
3252 writel(0x0106, &msg[3]); /* Transaction context */
3253 - writel(4096, &msg[4]); /* Host page frame size */
3254 - writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]); /* Outbound msg frame size and Initcode */
3255 - writel(0xD0000004, &msg[6]); /* Simple SG LE, EOB */
3256 - writel(virt_to_bus(status), &msg[7]);
3257 + writel( cpu_to_le32(4096), &msg[4]); /* Host page frame size */
3258 + writel( cpu_to_le32((REPLY_FRAME_SIZE)<<16|0x80), &msg[5]); /* Outbound msg frame size and Initcode */
3259 + writel( cpu_to_le32(0xD0000004), &msg[6]); /* Simple SG LE, EOB */
3260 + writel( cpu_to_le32(addr), &msg[7]);
3261
3262 writel(m, pHba->post_port);
3263 wmb();
3264 @@ -2664,36 +3347,42 @@
3265 rmb();
3266 if(time_after(jiffies,timeout)){
3267 printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name);
3268 - kfree((void*)status);
3269 + /* We loose 4 bytes of "status" here, but we
3270 + cannot free these because controller may
3271 + awake and corrupt those bytes at any time */
3272 + /* pci_free_consistent(pHba->pDev, 4, status, addr); */
3273 return -ETIMEDOUT;
3274 }
3275 } while (1);
3276
3277 // If the command was successful, fill the fifo with our reply
3278 // message packets
3279 - if(*status != 0x04 /*I2O_EXEC_OUTBOUND_INIT_COMPLETE*/) {
3280 - kfree((void*)status);
3281 + if(*status != le32_to_cpu(0x04) /*I2O_EXEC_OUTBOUND_INIT_COMPLETE*/) {
3282 + pci_free_consistent(pHba->pDev, 4, status, addr);
3283 return -2;
3284 }
3285 - kfree((void*)status);
3286 + pci_free_consistent(pHba->pDev, 4, status, addr);
3287
3288 - if(pHba->reply_pool != NULL){
3289 - kfree(pHba->reply_pool);
3290 + if(pHba->reply_pool_va != NULL){
3291 + pci_free_consistent(pHba->pDev, pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4, pHba->reply_pool_va, pHba->reply_pool_pa);
3292 }
3293
3294 - pHba->reply_pool = (u32*)kmalloc(pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4, GFP_KERNEL|ADDR32);
3295 - if(!pHba->reply_pool){
3296 + pHba->reply_pool_va = (u32*)pci_alloc_consistent(pHba->pDev, pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4, &pHba->reply_pool_pa);
3297 + if(!pHba->reply_pool_va){
3298 printk(KERN_ERR"%s: Could not allocate reply pool\n",pHba->name);
3299 return -1;
3300 }
3301 - memset(pHba->reply_pool, 0 , pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4);
3302 + if( (sizeof(dma_addr_t) > 4) && (pHba->pae_support) && (((u64)pHba->reply_pool_pa >> 32) != 0) ) {
3303 + pci_free_consistent(pHba->pDev, pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4, pHba->reply_pool_va, pHba->reply_pool_pa);
3304 + pHba->reply_pool_va = NULL;
3305 + printk(KERN_ERR"%s: Could not allocate reply pool in 32 bit space\n",pHba->name);
3306 + return -1;
3307 + }
3308 + memset(pHba->reply_pool_va, 0 , pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4);
3309
3310 - ptr = pHba->reply_pool;
3311 for(i = 0; i < pHba->reply_fifo_size; i++) {
3312 - outbound_frame = (u32)virt_to_bus(ptr);
3313 - writel(outbound_frame, pHba->reply_port);
3314 + writel(cpu_to_le32(pHba->reply_pool_pa + (i * REPLY_FRAME_SIZE * 4)), pHba->reply_port);
3315 wmb();
3316 - ptr += REPLY_FRAME_SIZE;
3317 }
3318 adpt_i2o_status_get(pHba);
3319 return 0;
3320 @@ -2717,26 +3406,26 @@
3321 u32 m;
3322 u32 *msg;
3323 u8 *status_block=NULL;
3324 - ulong status_block_bus;
3325 + u64 status_block_pa;
3326
3327 - if(pHba->status_block == NULL) {
3328 - pHba->status_block = (i2o_status_block*)
3329 - kmalloc(sizeof(i2o_status_block),GFP_KERNEL|ADDR32);
3330 - if(pHba->status_block == NULL) {
3331 + if(pHba->status_block_va == NULL) {
3332 + pHba->status_block_va = (i2o_status_block*)
3333 + pci_alloc_consistent(pHba->pDev, sizeof(i2o_status_block),
3334 + &pHba->status_block_pa);
3335 + if(pHba->status_block_va == NULL) {
3336 printk(KERN_ERR
3337 "dpti%d: Get Status Block failed; Out of memory. \n",
3338 pHba->unit);
3339 return -ENOMEM;
3340 }
3341 }
3342 - memset(pHba->status_block, 0, sizeof(i2o_status_block));
3343 - status_block = (u8*)(pHba->status_block);
3344 - status_block_bus = virt_to_bus(pHba->status_block);
3345 + memset(pHba->status_block_va, 0, sizeof(i2o_status_block));
3346 + status_block = (u8*)(pHba->status_block_va);
3347 timeout = jiffies+TMOUT_GETSTATUS*HZ;
3348 do {
3349 rmb();
3350 m = readl(pHba->post_port);
3351 - if (m != EMPTY_QUEUE) {
3352 + if (m != cpu_to_le32(EMPTY_QUEUE)) {
3353 break;
3354 }
3355 if(time_after(jiffies,timeout)){
3356 @@ -2744,20 +3433,21 @@
3357 pHba->name);
3358 return -ETIMEDOUT;
3359 }
3360 - } while(m==EMPTY_QUEUE);
3361 + } while(m==cpu_to_le32(EMPTY_QUEUE));
3362
3363
3364 - msg=(u32*)(pHba->msg_addr_virt+m);
3365 + msg=(u32*)(pHba->msg_addr_virt+le32_to_cpu(m));
3366
3367 - writel(NINE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg[0]);
3368 - writel(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID, &msg[1]);
3369 - writel(1, &msg[2]);
3370 + writel( cpu_to_le32(NINE_WORD_MSG_SIZE|SGL_OFFSET_0), &msg[0]);
3371 + writel( cpu_to_le32(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID), &msg[1]);
3372 + writel( cpu_to_le32(1), &msg[2]);
3373 writel(0, &msg[3]);
3374 writel(0, &msg[4]);
3375 writel(0, &msg[5]);
3376 - writel(((u32)status_block_bus)&0xffffffff, &msg[6]);
3377 - writel(0, &msg[7]);
3378 - writel(sizeof(i2o_status_block), &msg[8]); // 88 bytes
3379 + status_block_pa = cpu_to_le64(pHba->status_block_pa);
3380 + writel( (u32)status_block_pa, &msg[6]);
3381 + writel( (u32)(status_block_pa >> 32), &msg[7]);
3382 + writel( cpu_to_le32(sizeof(i2o_status_block)), &msg[8]); // 88 bytes
3383
3384 //post message
3385 writel(m, pHba->post_port);
3386 @@ -2773,18 +3463,25 @@
3387 }
3388
3389 // Set up our number of outbound and inbound messages
3390 - pHba->post_fifo_size = pHba->status_block->max_inbound_frames;
3391 + pHba->post_fifo_size = le32_to_cpu(pHba->status_block_va->max_inbound_frames);
3392 if (pHba->post_fifo_size > MAX_TO_IOP_MESSAGES) {
3393 pHba->post_fifo_size = MAX_TO_IOP_MESSAGES;
3394 }
3395
3396 - pHba->reply_fifo_size = pHba->status_block->max_outbound_frames;
3397 + pHba->reply_fifo_size = le32_to_cpu(pHba->status_block_va->max_outbound_frames);
3398 if (pHba->reply_fifo_size > MAX_FROM_IOP_MESSAGES) {
3399 pHba->reply_fifo_size = MAX_FROM_IOP_MESSAGES;
3400 }
3401
3402 // Calculate the Scatter Gather list size
3403 - pHba->sg_tablesize = (pHba->status_block->inbound_frame_size * 4 -40)/ sizeof(struct sg_simple_element);
3404 + pHba->sg_tablesize = (le32_to_cpu(pHba->status_block_va->inbound_frame_size) * 4 - 12 * sizeof(u32))/ sizeof(struct sg_simple_element);
3405 + if( (sizeof(dma_addr_t) > 4)
3406 + && (num_physpages > (0xFFFFFFFFULL >> PAGE_SHIFT))) {
3407 + pHba->sg_tablesize
3408 + = (le32_to_cpu(pHba->status_block_va->inbound_frame_size) * 4
3409 + - 14 * sizeof(u32))
3410 + / (sizeof(struct sg_simple_element) + sizeof(u32));
3411 + }
3412 if (pHba->sg_tablesize > SG_LIST_ELEMENTS) {
3413 pHba->sg_tablesize = SG_LIST_ELEMENTS;
3414 }
3415 @@ -2792,7 +3489,7 @@
3416
3417 #ifdef DEBUG
3418 printk("dpti%d: State = ",pHba->unit);
3419 - switch(pHba->status_block->iop_state) {
3420 + switch(le32_to_cpu(pHba->status_block_va->iop_state)) {
3421 case 0x01:
3422 printk("INIT\n");
3423 break;
3424 @@ -2815,7 +3512,7 @@
3425 printk("FAULTED\n");
3426 break;
3427 default:
3428 - printk("%x (unknown!!)\n",pHba->status_block->iop_state);
3429 + printk("%x (unknown!!)\n",le32_to_cpu(pHba->status_block_va->iop_state));
3430 }
3431 #endif
3432 return 0;
3433 @@ -2830,28 +3527,35 @@
3434 int ret;
3435 u32 buf[16];
3436
3437 - if ((pHba->lct_size == 0) || (pHba->lct == NULL)){
3438 - pHba->lct_size = pHba->status_block->expected_lct_size;
3439 + if ((pHba->lct_size == 0) || (pHba->lct_va == NULL)){
3440 + pHba->lct_size = le32_to_cpu(pHba->status_block_va->expected_lct_size);
3441 }
3442 do {
3443 - if (pHba->lct == NULL) {
3444 - pHba->lct = kmalloc(pHba->lct_size, GFP_KERNEL|ADDR32);
3445 - if(pHba->lct == NULL) {
3446 + if (pHba->lct_va == NULL) {
3447 + pHba->lct_va = pci_alloc_consistent(pHba->pDev, pHba->lct_size, &pHba->lct_pa);
3448 + if(pHba->lct_va == NULL) {
3449 printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
3450 pHba->name);
3451 return -ENOMEM;
3452 }
3453 + if( (sizeof(dma_addr_t) > 4) && (pHba->pae_support) && (((u64)pHba->lct_pa >> 32) != 0) ) {
3454 + pci_free_consistent(pHba->pDev, pHba->lct_size, pHba->lct_va, pHba->lct_pa);
3455 + pHba->lct_va = NULL;
3456 + printk(KERN_CRIT "%s: Lct Get failed. Out of 32 bit memory.\n",
3457 + pHba->name);
3458 + return -ENOMEM;
3459 + }
3460 }
3461 - memset(pHba->lct, 0, pHba->lct_size);
3462 + memset(pHba->lct_va, 0, pHba->lct_size);
3463
3464 - msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
3465 - msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
3466 + msg[0] = cpu_to_le32(EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6);
3467 + msg[1] = cpu_to_le32(I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID);
3468 msg[2] = 0;
3469 msg[3] = 0;
3470 msg[4] = 0xFFFFFFFF; /* All devices */
3471 msg[5] = 0x00000000; /* Report now */
3472 - msg[6] = 0xD0000000|pHba->lct_size;
3473 - msg[7] = virt_to_bus(pHba->lct);
3474 + msg[6] = cpu_to_le32(0xD0000000|pHba->lct_size);
3475 + msg[7] = cpu_to_le32(pHba->lct_pa);
3476
3477 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) {
3478 printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n",
3479 @@ -2860,26 +3564,29 @@
3480 return ret;
3481 }
3482
3483 - if ((pHba->lct->table_size << 2) > pHba->lct_size) {
3484 - pHba->lct_size = pHba->lct->table_size << 2;
3485 - kfree(pHba->lct);
3486 - pHba->lct = NULL;
3487 + ret = le32_to_cpu(pHba->lct_va->table_size) << 2;
3488 + if (ret > pHba->lct_size) {
3489 + pci_free_consistent(pHba->pDev, pHba->lct_size, pHba->lct_va, pHba->lct_pa);
3490 + pHba->lct_size = ret;
3491 + pHba->lct_va = NULL;
3492 }
3493 - } while (pHba->lct == NULL);
3494 + } while (pHba->lct_va == NULL);
3495
3496 PDEBUG("%s: Hardware resource table read.\n", pHba->name);
3497
3498
3499 // I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO;
3500 if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) {
3501 - pHba->FwDebugBufferSize = buf[1];
3502 - pHba->FwDebugBuffer_P = pHba->base_addr_virt + buf[0];
3503 - pHba->FwDebugFlags_P = pHba->FwDebugBuffer_P + FW_DEBUG_FLAGS_OFFSET;
3504 - pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P + FW_DEBUG_BLED_OFFSET;
3505 - pHba->FwDebugBLEDflag_P = pHba->FwDebugBLEDvalue_P + 1;
3506 - pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P + FW_DEBUG_STR_LENGTH_OFFSET;
3507 - pHba->FwDebugBuffer_P += buf[2];
3508 - pHba->FwDebugFlags = 0;
3509 + pHba->fw_buffer_addr_virt = pHba->base_addr_virt + buf[0];
3510 + pHba->bled_value_addr_virt = pHba->fw_buffer_addr_virt + FW_DEBUG_BLED_OFFSET;
3511 + pHba->bled_flag_addr_virt = pHba->bled_value_addr_virt + 1;
3512 + /* bled fields are both 8 bits, so increment by one is what is desired*/
3513 +#ifdef DEBUG
3514 + pHba->fw_buffer_size = buf[1];
3515 + pHba->fw_debug_flags_addr_virt = pHba->fw_buffer_addr_virt + FW_DEBUG_FLAGS_OFFSET;
3516 + pHba->fw_string_len_addr_virt = pHba->fw_buffer_addr_virt + FW_DEBUG_STR_LENGTH_OFFSET;
3517 +#endif
3518 + pHba->fw_buffer_addr_virt += buf[2];
3519 }
3520
3521 return 0;
3522 @@ -2887,51 +3594,58 @@
3523
3524 static int adpt_i2o_build_sys_table(void)
3525 {
3526 - adpt_hba* pHba = NULL;
3527 + adpt_hba* pHba = hba_chain;
3528 int count = 0;
3529
3530 + if(sys_tbl_va)
3531 + pci_free_consistent(pHba->pDev, sys_tbl_len, sys_tbl_va, sys_tbl_pa);
3532 sys_tbl_len = sizeof(struct i2o_sys_tbl) + // Header + IOPs
3533 (hba_count) * sizeof(struct i2o_sys_tbl_entry);
3534
3535 - if(sys_tbl)
3536 - kfree(sys_tbl);
3537 -
3538 - sys_tbl = kmalloc(sys_tbl_len, GFP_KERNEL|ADDR32);
3539 - if(!sys_tbl) {
3540 + sys_tbl_va = pci_alloc_consistent(pHba->pDev, sys_tbl_len, &sys_tbl_pa);
3541 + if(!sys_tbl_va) {
3542 printk(KERN_WARNING "SysTab Set failed. Out of memory.\n");
3543 return -ENOMEM;
3544 }
3545 - memset(sys_tbl, 0, sys_tbl_len);
3546 + if( (sizeof(dma_addr_t) > 4) && (pHba->pae_support) && (((u64)sys_tbl_pa >> 32) != 0) ) {
3547 + pci_free_consistent(pHba->pDev, sys_tbl_len, sys_tbl_va, sys_tbl_pa);
3548 + sys_tbl_va = NULL;
3549 + printk(KERN_WARNING "SysTab Set failed. Out of 32 bit memory.\n");
3550 + return -ENOMEM;
3551 + }
3552 + memset(sys_tbl_va, 0, sys_tbl_len);
3553
3554 - sys_tbl->num_entries = hba_count;
3555 - sys_tbl->version = I2OVERSION;
3556 - sys_tbl->change_ind = sys_tbl_ind++;
3557 + sys_tbl_va->num_entries = cpu_to_le32(hba_count);
3558 + sys_tbl_va->version = cpu_to_le32(I2OVERSION);
3559 + sys_tbl_va->change_ind = cpu_to_le32(sys_tbl_ind++);
3560
3561 for(pHba = hba_chain; pHba; pHba = pHba->next) {
3562 + u64 addr;
3563 // Get updated Status Block so we have the latest information
3564 if (adpt_i2o_status_get(pHba)) {
3565 - sys_tbl->num_entries--;
3566 + sys_tbl_va->num_entries = cpu_to_le32(le32_to_cpu(sys_tbl_va->num_entries) - 1);
3567 continue; // try next one
3568 }
3569
3570 - sys_tbl->iops[count].org_id = pHba->status_block->org_id;
3571 - sys_tbl->iops[count].iop_id = pHba->unit + 2;
3572 - sys_tbl->iops[count].seg_num = 0;
3573 - sys_tbl->iops[count].i2o_version = pHba->status_block->i2o_version;
3574 - sys_tbl->iops[count].iop_state = pHba->status_block->iop_state;
3575 - sys_tbl->iops[count].msg_type = pHba->status_block->msg_type;
3576 - sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size;
3577 - sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ??
3578 - sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities;
3579 - sys_tbl->iops[count].inbound_low = (u32)virt_to_bus((void*)pHba->post_port);
3580 - sys_tbl->iops[count].inbound_high = (u32)((u64)virt_to_bus((void*)pHba->post_port)>>32);
3581 + sys_tbl_va->iops[count].org_id = pHba->status_block_va->org_id;
3582 + sys_tbl_va->iops[count].iop_id = cpu_to_le32(pHba->unit + 2);
3583 + sys_tbl_va->iops[count].seg_num = 0;
3584 + sys_tbl_va->iops[count].i2o_version = pHba->status_block_va->i2o_version;
3585 + sys_tbl_va->iops[count].iop_state = pHba->status_block_va->iop_state;
3586 + sys_tbl_va->iops[count].msg_type = pHba->status_block_va->msg_type;
3587 + sys_tbl_va->iops[count].frame_size = pHba->status_block_va->inbound_frame_size;
3588 + sys_tbl_va->iops[count].last_changed = cpu_to_le32(sys_tbl_ind - 1); // ??
3589 + sys_tbl_va->iops[count].iop_capabilities = pHba->status_block_va->iop_capabilities;
3590 + addr = cpu_to_le64(pHba->base_addr_phys + 0x40);
3591 + sys_tbl_va->iops[count].inbound_low = (u32)addr;
3592 + sys_tbl_va->iops[count].inbound_high = (u32)(addr >> 32);
3593
3594 count++;
3595 }
3596
3597 #ifdef DEBUG
3598 {
3599 - u32 *table = (u32*)sys_tbl;
3600 + u32 *table = (u32*)sys_tbl_va;
3601 printk(KERN_DEBUG"sys_tbl_len=%d in 32bit words\n",(sys_tbl_len >>2));
3602 for(count = 0; count < (sys_tbl_len >>2); count++) {
3603 printk(KERN_INFO "sys_tbl[%d] = %0#10x\n",
3604 @@ -2951,7 +3665,7 @@
3605 static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d)
3606 {
3607 char buf[64];
3608 - int unit = d->lct_data.tid;
3609 + int unit = le32_to_cpu(d->lct_data.tid);
3610
3611 printk(KERN_INFO "TID %3.3d ", unit);
3612
3613 @@ -2971,17 +3685,17 @@
3614 printk(" Rev: %-12.12s\n", buf);
3615 }
3616 #ifdef DEBUG
3617 - printk(KERN_INFO "\tClass: %.21s\n", adpt_i2o_get_class_name(d->lct_data.class_id));
3618 - printk(KERN_INFO "\tSubclass: 0x%04X\n", d->lct_data.sub_class);
3619 + printk(KERN_INFO "\tClass: %.21s\n", adpt_i2o_get_class_name(le32_to_cpu(d->lct_data.class_id)));
3620 + printk(KERN_INFO "\tSubclass: 0x%04X\n", le32_to_cpu(d->lct_data.sub_class));
3621 printk(KERN_INFO "\tFlags: ");
3622
3623 - if(d->lct_data.device_flags&(1<<0))
3624 + if(le32_to_cpu(d->lct_data.device_flags)&(1<<0))
3625 printk("C"); // ConfigDialog requested
3626 - if(d->lct_data.device_flags&(1<<1))
3627 + if(le32_to_cpu(d->lct_data.device_flags)&(1<<1))
3628 printk("U"); // Multi-user capable
3629 - if(!(d->lct_data.device_flags&(1<<4)))
3630 + if(!(le32_to_cpu(d->lct_data.device_flags)&(1<<4)))
3631 printk("P"); // Peer service enabled!
3632 - if(!(d->lct_data.device_flags&(1<<5)))
3633 + if(!(le32_to_cpu(d->lct_data.device_flags)&(1<<5)))
3634 printk("M"); // Mgmt service enabled!
3635 printk("\n");
3636 #endif
3637 @@ -3056,35 +3770,42 @@
3638 static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
3639 {
3640 u32 msg[6];
3641 - int ret, size = sizeof(i2o_hrt);
3642 + int ret, size = sizeof(i2o_hrt), new_size;
3643
3644 do {
3645 - if (pHba->hrt == NULL) {
3646 - pHba->hrt=kmalloc(size, GFP_KERNEL|ADDR32);
3647 - if (pHba->hrt == NULL) {
3648 + if (pHba->hrt_va == NULL) {
3649 + pHba->hrt_va = pci_alloc_consistent(pHba->pDev, size, &pHba->hrt_pa);
3650 + if (pHba->hrt_va == NULL) {
3651 printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name);
3652 return -ENOMEM;
3653 }
3654 + if( (sizeof(dma_addr_t) > 4) && (pHba->pae_support) && (((u64)pHba->hrt_pa >> 32) != 0) ) {
3655 + pci_free_consistent(pHba->pDev, size, pHba->hrt_va, pHba->hrt_pa);
3656 + pHba->hrt_va = NULL;
3657 + printk(KERN_CRIT "%s: Hrt Get failed; Out of 32 bit memory.\n", pHba->name);
3658 + return -ENOMEM;
3659 + }
3660 }
3661
3662 - msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
3663 - msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
3664 + msg[0]= cpu_to_le32(SIX_WORD_MSG_SIZE| SGL_OFFSET_4);
3665 + msg[1]= cpu_to_le32(I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID);
3666 msg[2]= 0;
3667 msg[3]= 0;
3668 - msg[4]= (0xD0000000 | size); /* Simple transaction */
3669 - msg[5]= virt_to_bus(pHba->hrt); /* Dump it here */
3670 + msg[4]= cpu_to_le32(0xD0000000 | size); /* Simple transaction */
3671 + msg[5]= cpu_to_le32(pHba->hrt_pa); /* Dump it here */
3672
3673 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) {
3674 printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret);
3675 return ret;
3676 }
3677
3678 - if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) {
3679 - size = pHba->hrt->num_entries * pHba->hrt->entry_len << 2;
3680 - kfree(pHba->hrt);
3681 - pHba->hrt = NULL;
3682 + new_size = le32_to_cpu(pHba->hrt_va->num_entries) * le32_to_cpu(pHba->hrt_va->entry_len) << 2;
3683 + if (new_size > size) {
3684 + pci_free_consistent(pHba->pDev, size, pHba->hrt_va, pHba->hrt_pa);
3685 + size = new_size;
3686 + pHba->hrt_va = NULL;
3687 }
3688 - } while(pHba->hrt == NULL);
3689 + } while(pHba->hrt_va == NULL);
3690 return 0;
3691 }
3692
3693 @@ -3094,18 +3815,47 @@
3694 static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
3695 int group, int field, void *buf, int buflen)
3696 {
3697 - u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
3698 - u8 resblk[8+buflen]; /* 8 bytes for header */
3699 + u16 opblk[] = { cpu_to_le16(1), 0, cpu_to_le16(I2O_PARAMS_FIELD_GET), cpu_to_le16(group), cpu_to_le16(1), cpu_to_le16(field) };
3700 + u8 *opblk_va;
3701 + dma_addr_t opblk_pa;
3702 + u8 *resblk_va;
3703 + dma_addr_t resblk_pa;
3704 int size;
3705
3706 + /* 8 bytes for header */
3707 + resblk_va = pci_alloc_consistent(pHba->pDev, sizeof(u8) * (8 + buflen), &resblk_pa);
3708 + if (resblk_va == NULL) {
3709 + printk(KERN_CRIT "%s: query scaler failed; Out of memory.\n", pHba->name);
3710 + return -ENOMEM;
3711 + }
3712 +
3713 + opblk_va = pci_alloc_consistent(pHba->pDev, sizeof(opblk), &opblk_pa);
3714 + if (opblk_va == NULL) {
3715 + pci_free_consistent(pHba->pDev, sizeof(u8) * (8+buflen), resblk_va, resblk_pa);
3716 + printk(KERN_CRIT "%s: query operatio failed; Out of memory.\n", pHba->name);
3717 + return -ENOMEM;
3718 + }
3719 +
3720 if (field == -1) /* whole group */
3721 - opblk[4] = -1;
3722 + opblk[4] = -1;
3723 + memcpy(opblk_va, opblk, sizeof(opblk));
3724
3725 size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid,
3726 - opblk, sizeof(opblk), resblk, sizeof(resblk));
3727 + opblk_va, opblk_pa, sizeof(opblk), resblk_va, resblk_pa, sizeof(u8)*(8+buflen));
3728 + pci_free_consistent(pHba->pDev, sizeof(opblk), opblk_va, opblk_pa);
3729 + if (size == -ETIME) {
3730 + pci_free_consistent(pHba->pDev, sizeof(u8) * (8+buflen), resblk_va, resblk_pa);
3731 + printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name);
3732 + return -ETIME;
3733 + } else if (size == -EINTR) {
3734 + pci_free_consistent(pHba->pDev, sizeof(u8) * (8+buflen), resblk_va, resblk_pa);
3735 + printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name);
3736 + return -EINTR;
3737 + }
3738
3739 - memcpy(buf, resblk+8, buflen); /* cut off header */
3740 + memcpy(buf, resblk_va+8, buflen); /* cut off header */
3741
3742 + pci_free_consistent(pHba->pDev, sizeof(u8) * (8+buflen), resblk_va, resblk_pa);
3743 if (size < 0)
3744 return size;
3745
3746 @@ -3122,37 +3872,38 @@
3747 * ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
3748 */
3749 static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
3750 - void *opblk, int oplen, void *resblk, int reslen)
3751 + void *opblk_va, dma_addr_t opblk_pa, int oplen,
3752 + void *resblk_va, dma_addr_t resblk_pa, int reslen)
3753 {
3754 u32 msg[9];
3755 - u32 *res = (u32 *)resblk;
3756 + u32 *res = (u32 *)resblk_va;
3757 int wait_status;
3758
3759 - msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
3760 - msg[1] = cmd << 24 | HOST_TID << 12 | tid;
3761 + msg[0] = cpu_to_le32(NINE_WORD_MSG_SIZE | SGL_OFFSET_5);
3762 + msg[1] = cpu_to_le32(cmd << 24 | HOST_TID << 12 | tid);
3763 msg[2] = 0;
3764 msg[3] = 0;
3765 msg[4] = 0;
3766 - msg[5] = 0x54000000 | oplen; /* OperationBlock */
3767 - msg[6] = virt_to_bus(opblk);
3768 - msg[7] = 0xD0000000 | reslen; /* ResultBlock */
3769 - msg[8] = virt_to_bus(resblk);
3770 + msg[5] = cpu_to_le32(0x54000000 | oplen); /* OperationBlock */
3771 + msg[6] = cpu_to_le32(opblk_pa);
3772 + msg[7] = cpu_to_le32(0xD0000000 | reslen); /* ResultBlock */
3773 + msg[8] = cpu_to_le32(resblk_pa);
3774
3775 if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) {
3776 return wait_status; /* -DetailedStatus */
3777 }
3778
3779 - if (res[1]&0x00FF0000) { /* BlockStatus != SUCCESS */
3780 + if (res[1]&cpu_to_le32(0x00FF0000)) { /* BlockStatus != SUCCESS */
3781 printk(KERN_WARNING "%s: %s - Error:\n ErrorInfoSize = 0x%02x, "
3782 "BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
3783 pHba->name,
3784 (cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET"
3785 : "PARAMS_GET",
3786 - res[1]>>24, (res[1]>>16)&0xFF, res[1]&0xFFFF);
3787 - return -((res[1] >> 16) & 0xFF); /* -BlockStatus */
3788 + le32_to_cpu(res[1])>>24, (le32_to_cpu(res[1])>>16)&0xFF, le32_to_cpu(res[1])&0xFFFF);
3789 + return -((le32_to_cpu(res[1]) >> 16) & 0xFF); /* -BlockStatus */
3790 }
3791
3792 - return 4 + ((res[1] & 0x0000FFFF) << 2); /* bytes used in resblk */
3793 + return 4 + ((le32_to_cpu(res[1]) & 0x0000FFFF) << 2); /* bytes used in resblk */
3794 }
3795
3796
3797 @@ -3165,13 +3916,13 @@
3798
3799 /* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */
3800
3801 - if((pHba->status_block->iop_state != ADAPTER_STATE_READY) &&
3802 - (pHba->status_block->iop_state != ADAPTER_STATE_OPERATIONAL)){
3803 + if((pHba->status_block_va->iop_state != cpu_to_le32(ADAPTER_STATE_READY)) &&
3804 + (pHba->status_block_va->iop_state != cpu_to_le32(ADAPTER_STATE_OPERATIONAL))){
3805 return 0;
3806 }
3807
3808 - msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3809 - msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
3810 + msg[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE|SGL_OFFSET_0);
3811 + msg[1] = cpu_to_le32(I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID);
3812 msg[2] = 0;
3813 msg[3] = 0;
3814
3815 @@ -3196,18 +3947,18 @@
3816 int ret;
3817
3818 adpt_i2o_status_get(pHba);
3819 - if(!pHba->status_block){
3820 + if(!pHba->status_block_va){
3821 return -ENOMEM;
3822 }
3823 /* Enable only allowed on READY state */
3824 - if(pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL)
3825 + if(pHba->status_block_va->iop_state == cpu_to_le32(ADAPTER_STATE_OPERATIONAL))
3826 return 0;
3827
3828 - if(pHba->status_block->iop_state != ADAPTER_STATE_READY)
3829 + if(pHba->status_block_va->iop_state != cpu_to_le32(ADAPTER_STATE_READY))
3830 return -EINVAL;
3831
3832 - msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3833 - msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID;
3834 + msg[0]= cpu_to_le32(FOUR_WORD_MSG_SIZE|SGL_OFFSET_0);
3835 + msg[1]= cpu_to_le32(I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID);
3836 msg[2]= 0;
3837 msg[3]= 0;
3838
3839 @@ -3228,11 +3979,11 @@
3840 u32 msg[12];
3841 int ret;
3842
3843 - msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
3844 - msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
3845 + msg[0] = cpu_to_le32(I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6);
3846 + msg[1] = cpu_to_le32(I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID);
3847 msg[2] = 0;
3848 msg[3] = 0;
3849 - msg[4] = (0<<16) | ((pHba->unit+2) << 12); /* Host 0 IOP ID (unit + 2) */
3850 + msg[4] = cpu_to_le32((0<<16) | ((pHba->unit+2) << 12)); /* Host 0 IOP ID (unit + 2) */
3851 msg[5] = 0; /* Segment 0 */
3852
3853 /*
3854 @@ -3240,11 +3991,11 @@
3855 * System table (SysTab), Private memory space declaration and
3856 * Private i/o space declaration
3857 */
3858 - msg[6] = 0x54000000 | sys_tbl_len;
3859 - msg[7] = virt_to_phys(sys_tbl);
3860 - msg[8] = 0x54000000 | 0;
3861 + msg[6] = cpu_to_le32(0x54000000 | sys_tbl_len);
3862 + msg[7] = cpu_to_le32(sys_tbl_pa);
3863 + msg[8] = cpu_to_le32(0x54000000 | 0);
3864 msg[9] = 0;
3865 - msg[10] = 0xD4000000 | 0;
3866 + msg[10] = cpu_to_le32(0xD4000000 | 0);
3867 msg[11] = 0;
3868
3869 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 120))) {
3870 @@ -3279,24 +4030,64 @@
3871
3872 #endif
3873
3874 +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0))
3875 +static struct pci_dev *
3876 +adpt_pci_find_device(uint vendor, struct pci_dev *from)
3877 +{
3878 + if(!from){
3879 + from = pci_devices;
3880 + } else {
3881 + from = from->next;
3882 + }
3883 + while (from && from->vendor != vendor) {
3884 + from = from->next;
3885 + }
3886 + return from;
3887 +}
3888 +#endif
3889 +
3890 static Scsi_Host_Template driver_template = {
3891 - .name = "dpt_i2o",
3892 - .proc_name = "dpt_i2o",
3893 - .proc_info = adpt_proc_info,
3894 - .detect = adpt_detect,
3895 - .release = adpt_release,
3896 - .info = adpt_info,
3897 - .queuecommand = adpt_queue,
3898 - .eh_abort_handler = adpt_abort,
3899 - .eh_device_reset_handler = adpt_device_reset,
3900 - .eh_bus_reset_handler = adpt_bus_reset,
3901 - .eh_host_reset_handler = adpt_reset,
3902 - .bios_param = adpt_bios_param,
3903 - .slave_configure = adpt_slave_configure,
3904 - .can_queue = MAX_TO_IOP_MESSAGES,
3905 - .this_id = 7,
3906 - .cmd_per_lun = 1,
3907 - .use_clustering = ENABLE_CLUSTERING,
3908 + .name = "dpt_i2o",
3909 + .proc_name = "dpt_i2o",
3910 + .proc_info = adpt_proc_info,
3911 + .detect = adpt_detect,
3912 + .release = adpt_release,
3913 + .info = adpt_info,
3914 + .queuecommand = adpt_queue,
3915 + .eh_abort_handler = adpt_abort,
3916 + .eh_device_reset_handler = adpt_device_reset,
3917 + .eh_bus_reset_handler = adpt_bus_reset,
3918 + .eh_host_reset_handler = adpt_reset,
3919 + .bios_param = adpt_bios_param,
3920 +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,65))
3921 + .slave_configure = adpt_slave_configure,
3922 +#else
3923 + .select_queue_depths = adpt_select_queue_depths,
3924 +#endif
3925 + .can_queue = MAX_TO_IOP_MESSAGES,
3926 + .this_id = 7,
3927 +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,65))
3928 + .sg_tablesize = 0, /* max scatter-gather cmds */
3929 +#endif
3930 +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,7))
3931 + .max_sectors = 4096,
3932 +#endif
3933 + .cmd_per_lun = 256,
3934 + .use_clustering = ENABLE_CLUSTERING,
3935 +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,65))
3936 + .use_new_eh_code = 1,
3937 +#endif
3938 + .proc_dir = &proc_scsi_dptI2O,
3939 };
3940 +
3941 +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0))
3942 +#include "scsi_module.c"
3943 +#elif (defined(MODULE))
3944 #include "scsi_module.c"
3945 -MODULE_LICENSE("GPL");
3946 +#endif
3947 +
3948 +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,65))
3949 +EXPORT_NO_SYMBOLS;
3950 +#endif
3951 +
3952 +MODULE_LICENSE("Dual BSD/GPL");
3953 diff -ruN linux-2.6.1/drivers/scsi/dpti.h linux-2.6.1-adaptec/drivers/scsi/dpti.h
3954 --- linux-2.6.1/drivers/scsi/dpti.h 2004-01-09 01:59:02.000000000 -0500
3955 +++ linux-2.6.1-adaptec/drivers/scsi/dpti.h 2004-01-27 14:09:03.967233544 -0500
3956 @@ -3,10 +3,10 @@
3957 -------------------
3958 begin : Thu Sep 7 2000
3959 copyright : (C) 2001 by Adaptec
3960 - email : deanna_bonds@adaptec.com
3961 + email : Mark_Salyzyn@adaptec.com
3962 + original author : deanna_bonds@adaptec.com
3963
3964 - See Documentation/scsi/dpti.txt for history, notes, license info
3965 - and credits
3966 + See README.dpti for history, notes, license info, and credits
3967 ***************************************************************************/
3968
3969 /***************************************************************************
3970 @@ -37,16 +37,28 @@
3971 * SCSI interface function Prototypes
3972 */
3973
3974 +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,65))
3975 +static int adpt_proc_info(struct Scsi_Host * host, char *buffer, char **start, off_t offset, int length, int inout);
3976 +#else
3977 +static int adpt_proc_info(char *buffer, char **start, off_t offset, int length, int host_no, int inout);
3978 +#endif
3979 static int adpt_detect(Scsi_Host_Template * sht);
3980 static int adpt_queue(Scsi_Cmnd * cmd, void (*cmdcomplete) (Scsi_Cmnd *));
3981 static int adpt_abort(Scsi_Cmnd * cmd);
3982 static int adpt_reset(Scsi_Cmnd* cmd);
3983 static int adpt_release(struct Scsi_Host *host);
3984 +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,65))
3985 static int adpt_slave_configure(Scsi_Device *);
3986 +#else
3987 +static void adpt_select_queue_depths(struct Scsi_Host *host, Scsi_Device * devicelist);
3988 +#endif
3989
3990 static const char *adpt_info(struct Scsi_Host *pSHost);
3991 -static int adpt_bios_param(struct scsi_device * sdev, struct block_device *dev,
3992 - sector_t, int geom[]);
3993 +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,65))
3994 +static int adpt_bios_param(struct scsi_device * sdev, struct block_device * bdev, sector_t, int geom[]);
3995 +#else
3996 +static int adpt_bios_param(Disk * disk, kdev_t dev, int geom[]);
3997 +#endif
3998
3999 static int adpt_bus_reset(Scsi_Cmnd* cmd);
4000 static int adpt_device_reset(Scsi_Cmnd* cmd);
4001 @@ -62,17 +74,18 @@
4002
4003 #include "dpt/sys_info.h"
4004 #include <linux/wait.h>
4005 +#include <linux/interrupt.h>
4006 #include "dpt/dpti_i2o.h"
4007 #include "dpt/dpti_ioctl.h"
4008
4009 -#define DPT_I2O_VERSION "2.4 Build 5"
4010 +#define DPT_I2O_VERSION "2.5.0"
4011 #define DPT_VERSION 2
4012 -#define DPT_REVISION '4'
4013 -#define DPT_SUBREVISION '5'
4014 +#define DPT_REVISION '5'
4015 +#define DPT_SUBREVISION '0'
4016 #define DPT_BETA ""
4017 -#define DPT_MONTH 8
4018 -#define DPT_DAY 7
4019 -#define DPT_YEAR (2001-1980)
4020 +#define DPT_MONTH 8
4021 +#define DPT_DAY 4
4022 +#define DPT_YEAR (2003-1980)
4023
4024 #define DPT_DRIVER "dpt_i2o"
4025 #define DPTI_I2O_MAJOR (151)
4026 @@ -228,36 +241,42 @@
4027 char name[32];
4028 char detail[55];
4029
4030 - ulong base_addr_virt;
4031 - ulong msg_addr_virt;
4032 + char * base_addr_virt;
4033 + char * msg_addr_virt;
4034 ulong base_addr_phys;
4035 - ulong post_port;
4036 - ulong reply_port;
4037 - ulong irq_mask;
4038 + u32 * post_port;
4039 + u32 * reply_port;
4040 + u32 * irq_mask;
4041 u16 post_count;
4042 u32 post_fifo_size;
4043 u32 reply_fifo_size;
4044 - u32* reply_pool;
4045 + u32* reply_pool_va;
4046 + dma_addr_t reply_pool_pa;
4047 u32 sg_tablesize; // Scatter/Gather List Size.
4048 u8 top_scsi_channel;
4049 u8 top_scsi_id;
4050 u8 top_scsi_lun;
4051 + u8 pae_support;
4052
4053 - i2o_status_block* status_block;
4054 - i2o_hrt* hrt;
4055 - i2o_lct* lct;
4056 + i2o_status_block* status_block_va;
4057 + dma_addr_t status_block_pa;
4058 + i2o_hrt* hrt_va;
4059 + dma_addr_t hrt_pa;
4060 + i2o_lct* lct_va;
4061 + dma_addr_t lct_pa;
4062 uint lct_size;
4063 struct i2o_device* devices;
4064 struct adpt_channel channel[MAX_CHANNEL];
4065 struct proc_dir_entry* proc_entry; /* /proc dir */
4066
4067 - ulong FwDebugBuffer_P; // Virtual Address Of FW Debug Buffer
4068 - u32 FwDebugBufferSize; // FW Debug Buffer Size In Bytes
4069 - ulong FwDebugStrLength_P; // Virtual Addr Of FW Debug String Len
4070 - ulong FwDebugFlags_P; // Virtual Address Of FW Debug Flags
4071 - ulong FwDebugBLEDflag_P; // Virtual Addr Of FW Debug BLED
4072 - ulong FwDebugBLEDvalue_P; // Virtual Addr Of FW Debug BLED
4073 - u32 FwDebugFlags;
4074 + char * fw_buffer_addr_virt; // Virtual Address Of FW Debug Buffer
4075 + char * bled_flag_addr_virt; // Virtual Addr Of FW Debug BLED
4076 + char * bled_value_addr_virt; // Virtual Addr Of FW Debug BLED
4077 +
4078 + u32 fw_buffer_size; // FW Debug Buffer Size In Bytes
4079 + char * fw_string_len_addr_virt; // Virtual Addr Of FW Debug String Len
4080 + char * fw_debug_flags_addr_virt; // Virtual Address Of FW Debug Flags
4081 + u32 * ioctl_reply_context[4];
4082 } adpt_hba;
4083
4084 struct sg_simple_element {
4085 @@ -269,10 +288,28 @@
4086 * Function Prototypes
4087 */
4088
4089 +#ifndef IRQ_HANDLED
4090 + typedef void irqreturn_t;
4091 +# define IRQ_NONE
4092 +# define IRQ_HANDLED
4093 +#endif
4094 +
4095 +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0))
4096 +# define iminor(x) MINOR(x->i_rdev)
4097 +#endif
4098 +
4099 +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0))
4100 +# define set_current_state(x) current->state = x
4101 +#endif
4102 +
4103 +#ifndef MODULE_LICENSE
4104 +# define MODULE_LICENSE(x) /* NOTHING */
4105 +#endif
4106 +
4107 static void adpt_i2o_sys_shutdown(void);
4108 static int adpt_init(void);
4109 static int adpt_i2o_build_sys_table(void);
4110 -static void adpt_isr(int irq, void *dev_id, struct pt_regs *regs);
4111 +static irqreturn_t adpt_isr(int irq, void *dev_id, struct pt_regs *regs);
4112 #ifdef REBOOT_NOTIFIER
4113 static int adpt_reboot_event(struct notifier_block *n, ulong code, void *p);
4114 #endif
4115 @@ -284,7 +321,8 @@
4116 static const char *adpt_i2o_get_class_name(int class);
4117 #endif
4118 static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
4119 - void *opblk, int oplen, void *resblk, int reslen);
4120 + void *opblk_va, dma_addr_t opblk_pa, int oplen,
4121 + void *resblk_va, dma_addr_t resblk_pa, int reslen);
4122 static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout);
4123 static int adpt_i2o_lct_get(adpt_hba* pHba);
4124 static int adpt_i2o_parse_lct(adpt_hba* pHba);
4125 @@ -297,7 +335,7 @@
4126 static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba);
4127 static s32 adpt_i2o_hrt_get(adpt_hba* pHba);
4128 static s32 adpt_scsi_to_i2o(adpt_hba* pHba, Scsi_Cmnd* cmd, struct adpt_device* dptdevice);
4129 -static s32 adpt_i2o_to_scsi(ulong reply, Scsi_Cmnd* cmd);
4130 +static s32 adpt_i2o_to_scsi(u8 * reply, Scsi_Cmnd* cmd);
4131 static s32 adpt_scsi_register(adpt_hba* pHba,Scsi_Host_Template * sht);
4132 static s32 adpt_hba_reset(adpt_hba* pHba);
4133 static s32 adpt_i2o_reset_hba(adpt_hba* pHba);
4134 diff -ruN linux-2.6.1/drivers/scsi/dpti2oscsi2.c linux-2.6.1-adaptec/drivers/scsi/dpti2oscsi2.c
4135 --- linux-2.6.1/drivers/scsi/dpti2oscsi2.c 1969-12-31 19:00:00.000000000 -0500
4136 +++ linux-2.6.1-adaptec/drivers/scsi/dpti2oscsi2.c 2004-01-27 14:09:19.791827840 -0500
4137 @@ -0,0 +1,239 @@
4138 +
4139 +/*
4140 + * NOTE: You must include i2obscsi.h before including this file
4141 + */
4142 +
4143 +/*
4144 + * SCSI opcodes
4145 + */
4146 +/*
4147 + * i2oscsi2dixkxfer is a table of data transfer direction for SCSI Disk
4148 + * device commands. The table indicates if the command will transfer
4149 + * data in (from device to system) or data out (from system to device)
4150 + * NOSUPPORT indicates the command is not used for a disk device
4151 + */
4152 +
4153 +/* #define DATAIN I2O_SCB_FLAG_XFER_FROM_DEVICE */
4154 +#define DATAIN 0x01
4155 +/* #define DATAOUT I2O_SCB_FLAG_XFER_TO_DEVICE */
4156 +#define DATAOUT 0x02
4157 +/* #define NODATA I2O_SCB_FLAG_NO_DATA_XFER */
4158 +#define NODATA 0x00
4159 +#define NOSUPPORT 0xFF
4160 +
4161 +unsigned char i2oscsi2diskxfer[] = {
4162 +
4163 +
4164 +/* TEST_UNIT_READY 0x00 */ NODATA,
4165 +/* REZERO_UNIT 0x01 */ NODATA,
4166 +/* 0x02 */ NOSUPPORT,
4167 +/* REQUEST_SENSE 0x03 */ DATAIN,
4168 +/* FORMAT_UNIT 0x04 */ DATAOUT,
4169 +/* READ_BLOCK_LIMITS 0x05 */ DATAIN,
4170 +/* 0x06 */ NOSUPPORT,
4171 +/* REASSIGN_BLOCKS 0x07 */ DATAOUT,
4172 +/* READ_6 0x08 */ DATAIN,
4173 +
4174 +/* 0x09 */ NOSUPPORT,
4175 +
4176 +/* WRITE_6 0x0a */ DATAOUT,
4177 +/* SEEK_6 0x0b */ NODATA,
4178 +/* 0x0c */ NOSUPPORT,
4179 +/* 0x0d */ NOSUPPORT,
4180 +/* 0x0e */ NOSUPPORT,
4181 +/* READ_REVERSE 0x0f */ NOSUPPORT,
4182 +/* WRITE_FILEMARKS 0x10 */ NODATA,
4183 +/* SPACE 0x11 */ NOSUPPORT,
4184 +/* INQUIRY 0x12 */ DATAIN,
4185 +/* 0x13 */ NOSUPPORT,
4186 +/* RECOVER_BUFFERED_DATA 0x14 */ NOSUPPORT,
4187 +/* MODE_SELECT 0x15 */ DATAOUT,
4188 +/* RESERVE 0x16 */ DATAOUT,
4189 +/* RELEASE 0x17 */ NODATA,
4190 +/* COPY 0x18 */ DATAOUT,
4191 + /* ERASE 0x19 */ NOSUPPORT,
4192 + /* NODATA if supported */
4193 +/* MODE_SENSE 0x1a */ DATAIN,
4194 +/* START_STOP 0x1b */ NODATA,
4195 +/* RECEIVE_DIAGNOSTIC 0x1c */ DATAIN,
4196 +/* SEND_DIAGNOSTIC 0x1d */ DATAOUT,
4197 +/* ALLOW_MEDIUM_REMOVAL 0x1e */ NODATA,
4198 +
4199 +/* 0x1f */ NOSUPPORT,
4200 +/* 0x20 */ NOSUPPORT,
4201 +/* 0x21 */ NOSUPPORT,
4202 +/* 0x22 */ NOSUPPORT,
4203 +/* 0x23 */ NOSUPPORT,
4204 +
4205 +/* SET_WINDOW 0x24 */ DATAOUT,
4206 +/* READ_CAPACITY 0x25 */ DATAIN,
4207 +
4208 +/* 0x26 */ NOSUPPORT,
4209 +/* 0x27 */ NOSUPPORT,
4210 +
4211 +/* READ_10 0x28 */ DATAIN,
4212 +
4213 +/* 0x29 */ NOSUPPORT,
4214 +
4215 +/* WRITE_10 0x2a */ DATAOUT,
4216 +/* SEEK_10 0x2b */ NODATA,
4217 +
4218 +/* 0x2c */ NOSUPPORT,
4219 +/* 0x2d */ NOSUPPORT,
4220 +
4221 +/* WRITE_VERIFY 0x2e */ DATAOUT,
4222 +/* VERIFY 0x2f */ NODATA,
4223 +/* SEARCH_HIGH 0x30 */ DATAOUT,
4224 +/* SEARCH_EQUAL 0x31 */ DATAOUT,
4225 +/* SEARCH_LOW 0x32 */ DATAOUT,
4226 +/* SET_LIMITS 0x33 */ NODATA,
4227 +/* PRE_FETCH 0x34 */ DATAIN,
4228 +/* SYNCHRONIZE_CACHE 0x35 */ NODATA,
4229 +/* LOCK_UNLOCK_CACHE 0x36 */ NODATA,
4230 +/* READ_DEFECT_DATA 0x37 */ DATAIN,
4231 +/* MEDIUM_SCAN 0x38 */ DATAOUT,
4232 +/* COMPARE 0x39 */ DATAOUT,
4233 +/* COPY_VERIFY 0x3a */ DATAOUT,
4234 +/* WRITE_BUFFER 0x3b */ DATAOUT,
4235 +/* READ_BUFFER 0x3c */ DATAIN,
4236 +/* UPDATE_BLOCK 0x3d */ DATAOUT,
4237 +/* READ_LONG 0x3e */ DATAIN,
4238 +/* WRITE_LONG 0x3f */ DATAOUT,
4239 +/* CHANGE_DEFINITION 0x40 */ DATAOUT,
4240 +/* WRITE_SAME 0x41 */ DATAOUT,
4241 +
4242 +/* 0x42 */ DATAIN,
4243 +
4244 +/* READ_TOC 0x43 */ DATAIN,
4245 +
4246 +/* 0x44 */ DATAIN,
4247 +/* 0x45 */ NOSUPPORT,
4248 +/* 0x46 */ NOSUPPORT,
4249 +/* 0x47 */ NOSUPPORT,
4250 +/* 0x48 */ NOSUPPORT,
4251 +/* 0x49 */ NOSUPPORT,
4252 +/* 0x4a */ NOSUPPORT,
4253 +/* 0x4b */ NOSUPPORT,
4254 +
4255 +/* LOG_SELECT 0x4c */ DATAOUT,
4256 +/* LOG_SENSE 0x4d */ DATAIN,
4257 +
4258 +/* 0x4e */ NOSUPPORT,
4259 +/* 0x4f */ NOSUPPORT,
4260 +/* 0x50 */ NOSUPPORT,
4261 +/* 0x51 */ DATAIN,
4262 +/* 0x52 */ DATAIN,
4263 +/* 0x53 */ DATAOUT,
4264 +/* 0x54 */ NOSUPPORT,
4265 +
4266 +/* MODE_SELECT_10 0x55 */ DATAOUT,
4267 +
4268 +/* 0x56 */ NODATA,
4269 +/* 0x57 */ NODATA,
4270 +/* 0x58 */ NOSUPPORT,
4271 +/* 0x59 */ NOSUPPORT,
4272 +
4273 +/* MODE_SENSE_10 0x5a */ DATAIN,
4274 +
4275 +/* 0x5b */ DATAIN,
4276 +/* 0x5c */ DATAIN,
4277 +/* 0x5d */ NOSUPPORT,
4278 +/* 0x5e */ NOSUPPORT,
4279 +/* 0x5f */ NOSUPPORT,
4280 +
4281 +/* assign NOSUPPORT for 0x60 - 0x9f */
4282 + /* 0x60 - 0x6f */
4283 + NOSUPPORT, NOSUPPORT, NOSUPPORT, NOSUPPORT, NOSUPPORT, NOSUPPORT,
4284 + NOSUPPORT, NOSUPPORT,
4285 + NOSUPPORT, NOSUPPORT, NOSUPPORT, NOSUPPORT, NOSUPPORT, NOSUPPORT,
4286 + NOSUPPORT, NOSUPPORT,
4287 + /* 0x70 - 0x7f */
4288 + NOSUPPORT, NOSUPPORT, NOSUPPORT, NOSUPPORT, NOSUPPORT, NOSUPPORT,
4289 + NOSUPPORT, NOSUPPORT,
4290 + NOSUPPORT, NOSUPPORT, NOSUPPORT, NOSUPPORT, NOSUPPORT, NOSUPPORT,
4291 + NOSUPPORT, NOSUPPORT,
4292 + /* 0x80 - 0x8f */
4293 + NOSUPPORT, NOSUPPORT, NOSUPPORT, NOSUPPORT, NOSUPPORT, NOSUPPORT,
4294 + NOSUPPORT, NOSUPPORT,
4295 + NOSUPPORT, NOSUPPORT, NOSUPPORT, NOSUPPORT, NOSUPPORT, NOSUPPORT,
4296 + NOSUPPORT, NOSUPPORT,
4297 + /* 0x90 - 0x9f */
4298 + NOSUPPORT, NOSUPPORT, NOSUPPORT, NOSUPPORT, NOSUPPORT, NOSUPPORT,
4299 + NOSUPPORT, NOSUPPORT,
4300 + NOSUPPORT, NOSUPPORT, NOSUPPORT, NOSUPPORT, NOSUPPORT, NOSUPPORT,
4301 + NOSUPPORT, NOSUPPORT,
4302 +
4303 +/* 0xa0 */ NOSUPPORT,
4304 +/* 0xa1 */ NOSUPPORT,
4305 +/* 0xa2 */ NOSUPPORT,
4306 +/* 0xa3 */ NOSUPPORT,
4307 +/* 0xa4 */ NOSUPPORT,
4308 +
4309 +/* MOVE_MEDIUM 0xa5 */ NODATA,
4310 +
4311 +/* 0xa6 */ NOSUPPORT,
4312 +/* 0xa7 */ NOSUPPORT,
4313 +
4314 +/* READ_12 0xa8 */ DATAIN,
4315 +
4316 +/* 0xa9 */ NOSUPPORT,
4317 +
4318 +/* WRITE_12 0xaa */ DATAOUT,
4319 +
4320 +/* 0xab */ NOSUPPORT,
4321 +/* 0xac */ NOSUPPORT,
4322 +/* 0xad */ NOSUPPORT,
4323 +
4324 +/* WRITE_VERIFY_12 0xae */ DATAOUT,
4325 +
4326 +/* 0xaf */ NOSUPPORT,
4327 +
4328 +/* SEARCH_HIGH_12 0xb0 */ DATAOUT,
4329 +/* SEARCH_EQUAL_12 0xb1 */ DATAOUT,
4330 +/* SEARCH_LOW_12 0xb2 */ DATAOUT,
4331 +
4332 +/* 0xb3 */ NOSUPPORT,
4333 +/* 0xb4 */ NOSUPPORT,
4334 +/* 0xb5 */ NOSUPPORT,
4335 +
4336 +/* SEND_VOLUME_TAG 0xb6 */ DATAOUT,
4337 +
4338 +/* 0xb7 */ NOSUPPORT,
4339 +
4340 +/* READ_ELEMENT_STATUS 0xb8 */ DATAIN,
4341 +
4342 +/* 0xb9 */ NOSUPPORT,
4343 +/* 0xba */ NOSUPPORT,
4344 +/* 0xbb */ DATAOUT,
4345 +/* 0xbc */ NOSUPPORT,
4346 +/* 0xbd */ DATAIN,
4347 +/* 0xbe */ NOSUPPORT,
4348 +/* 0xbf */ NOSUPPORT,
4349 +
4350 +/* assign NOSUPPORT for 0xc0 - 0xdf */
4351 + /* 0xc0 - 0xcf */
4352 + NOSUPPORT, NOSUPPORT, NOSUPPORT, NOSUPPORT, NOSUPPORT, NOSUPPORT,
4353 + NOSUPPORT, NOSUPPORT,
4354 + NOSUPPORT, NOSUPPORT, NOSUPPORT, NOSUPPORT, NOSUPPORT, NOSUPPORT,
4355 + NOSUPPORT, NOSUPPORT,
4356 + /* 0xd0 - 0xdf */
4357 + NOSUPPORT, NOSUPPORT, NOSUPPORT, NOSUPPORT, NOSUPPORT, NOSUPPORT,
4358 + NOSUPPORT, NOSUPPORT,
4359 + NOSUPPORT, NOSUPPORT, NOSUPPORT, NOSUPPORT, NOSUPPORT, NOSUPPORT,
4360 + NOSUPPORT, NOSUPPORT,
4361 +
4362 +/* 0xe0 */ NOSUPPORT,
4363 +/* 0xe1 */ NOSUPPORT,
4364 +/* 0xe2 */ NOSUPPORT,
4365 +/* 0xe3 */ NOSUPPORT,
4366 +/* 0xe4 */ NOSUPPORT,
4367 +/* 0xe5 */ NOSUPPORT,
4368 +/* 0xe6 */ NOSUPPORT,
4369 +/* 0xe7 */ NOSUPPORT,
4370 +/* 0xe8 */ NOSUPPORT,
4371 +/* 0xe9 */ NOSUPPORT,
4372 +
4373 +/* WRITE_LONG_2 0xea */ DATAOUT
4374 +};
4375 +
4376 +#define DISKXFERTBLSIZE sizeof(i2oscsi2diskxfer)

  ViewVC Help
Powered by ViewVC 1.1.20