1 |
1275 |
phoenix |
/*
|
2 |
|
|
*
|
3 |
|
|
*
|
4 |
|
|
* Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
|
5 |
|
|
*
|
6 |
|
|
* This program is free software; you can redistribute it and/or modify it
|
7 |
|
|
* under the terms of version 2 of the GNU General Public License
|
8 |
|
|
* as published by the Free Software Foundation.
|
9 |
|
|
*
|
10 |
|
|
* This program is distributed in the hope that it would be useful, but
|
11 |
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
12 |
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
13 |
|
|
*
|
14 |
|
|
* Further, this software is distributed without any warranty that it is
|
15 |
|
|
* free of the rightful claim of any third person regarding infringement
|
16 |
|
|
* or the like. Any license provided herein, whether implied or
|
17 |
|
|
* otherwise, applies only to this software file. Patent licenses, if
|
18 |
|
|
* any, provided herein do not apply to combinations of this program with
|
19 |
|
|
* other software, or any other product whatsoever.
|
20 |
|
|
*
|
21 |
|
|
* You should have received a copy of the GNU General Public
|
22 |
|
|
* License along with this program; if not, write the Free Software
|
23 |
|
|
* Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
|
24 |
|
|
*
|
25 |
|
|
* Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
|
26 |
|
|
* Mountain View, CA 94043, or:
|
27 |
|
|
*
|
28 |
|
|
* http://www.sgi.com
|
29 |
|
|
*
|
30 |
|
|
* For further information regarding this notice, see:
|
31 |
|
|
*
|
32 |
|
|
* http://oss.sgi.com/projects/GenInfo/NoticeExplan
|
33 |
|
|
*/
|
34 |
|
|
|
35 |
|
|
|
36 |
|
|
#ifndef _ASM_IA64_SN_BTE_H
|
37 |
|
|
#define _ASM_IA64_SN_BTE_H
|
38 |
|
|
|
39 |
|
|
#include <linux/timer.h>
|
40 |
|
|
#include <linux/spinlock.h>
|
41 |
|
|
#include <linux/cache.h>
|
42 |
|
|
#include <asm/sn/io.h>
|
43 |
|
|
#include <asm/delay.h>
|
44 |
|
|
|
45 |
|
|
|
46 |
|
|
/* #define BTE_DEBUG */
|
47 |
|
|
/* #define BTE_DEBUG_VERBOSE */
|
48 |
|
|
|
49 |
|
|
#ifdef BTE_DEBUG
|
50 |
|
|
# define BTE_PRINTK(x) printk x /* Terse */
|
51 |
|
|
# ifdef BTE_DEBUG_VERBOSE
|
52 |
|
|
# define BTE_PRINTKV(x) printk x /* Verbose */
|
53 |
|
|
# else
|
54 |
|
|
# define BTE_PRINTKV(x)
|
55 |
|
|
# endif /* BTE_DEBUG_VERBOSE */
|
56 |
|
|
#else
|
57 |
|
|
# define BTE_PRINTK(x)
|
58 |
|
|
# define BTE_PRINTKV(x)
|
59 |
|
|
#endif /* BTE_DEBUG */
|
60 |
|
|
|
61 |
|
|
|
62 |
|
|
#ifndef L1_CACHE_MASK
|
63 |
|
|
#define L1_CACHE_MASK (L1_CACHE_BYTES - 1)
|
64 |
|
|
#endif
|
65 |
|
|
|
66 |
|
|
#ifndef L1_CACHE_ALIGNED
|
67 |
|
|
#define L1_CACHE_ALIGNED(_p) (((u64)(_p) & L1_CACHE_MASK) == 0)
|
68 |
|
|
#endif
|
69 |
|
|
|
70 |
|
|
|
71 |
|
|
/* BTE status register only supports 16 bits for length field */
|
72 |
|
|
#define BTE_LEN_BITS (16)
|
73 |
|
|
#define BTE_LEN_MASK ((1 << BTE_LEN_BITS) - 1)
|
74 |
|
|
#define BTE_MAX_XFER ((1 << BTE_LEN_BITS) * L1_CACHE_BYTES)
|
75 |
|
|
|
76 |
|
|
|
77 |
|
|
/* Define hardware */
|
78 |
|
|
#define BTES_PER_NODE 2
|
79 |
|
|
|
80 |
|
|
|
81 |
|
|
/* Define hardware modes */
|
82 |
|
|
#define BTE_NOTIFY (IBCT_NOTIFY)
|
83 |
|
|
#define BTE_NORMAL BTE_NOTIFY
|
84 |
|
|
#define BTE_ZERO_FILL (BTE_NOTIFY | IBCT_ZFIL_MODE)
|
85 |
|
|
/* Use a reserved bit to let the caller specify a wait for any BTE */
|
86 |
|
|
#define BTE_WACQUIRE (0x4000)
|
87 |
|
|
/* macro to force the IBCT0 value valid */
|
88 |
|
|
#define BTE_VALID_MODE(x) ((x) & (IBCT_NOTIFY | IBCT_ZFIL_MODE))
|
89 |
|
|
|
90 |
|
|
|
91 |
|
|
/*
|
92 |
|
|
* Handle locking of the bte interfaces.
|
93 |
|
|
*
|
94 |
|
|
* All transfers spinlock the interface before setting up the SHUB
|
95 |
|
|
* registers. Sync transfers hold the lock until all processing is
|
96 |
|
|
* complete. Async transfers release the lock as soon as the transfer
|
97 |
|
|
* is initiated.
|
98 |
|
|
*
|
99 |
|
|
* To determine if an interface is available, we must check both the
|
100 |
|
|
* busy bit and the spinlock for that interface.
|
101 |
|
|
*/
|
102 |
|
|
#define BTE_LOCK_IF_AVAIL(_x) (\
|
103 |
|
|
(*pda.cpu_bte_if[_x]->most_rcnt_na & (IBLS_BUSY | IBLS_ERROR)) && \
|
104 |
|
|
(!(spin_trylock(&(pda.cpu_bte_if[_x]->spinlock)))) \
|
105 |
|
|
)
|
106 |
|
|
|
107 |
|
|
/*
|
108 |
|
|
* Some macros to simplify reading.
|
109 |
|
|
* Start with macros to locate the BTE control registers.
|
110 |
|
|
*/
|
111 |
|
|
#define BTEREG_LNSTAT_ADDR ((u64 *)(bte->bte_base_addr))
|
112 |
|
|
#define BTEREG_SRC_ADDR ((u64 *)(bte->bte_base_addr + BTEOFF_SRC))
|
113 |
|
|
#define BTEREG_DEST_ADDR ((u64 *)(bte->bte_base_addr + BTEOFF_DEST))
|
114 |
|
|
#define BTEREG_CTRL_ADDR ((u64 *)(bte->bte_base_addr + BTEOFF_CTRL))
|
115 |
|
|
#define BTEREG_NOTIF_ADDR ((u64 *)(bte->bte_base_addr + BTEOFF_NOTIFY))
|
116 |
|
|
|
117 |
|
|
|
118 |
|
|
/* Possible results from bte_copy and bte_unaligned_copy */
|
119 |
|
|
typedef enum {
|
120 |
|
|
BTE_SUCCESS, /* 0 is success */
|
121 |
|
|
BTEFAIL_NOTAVAIL, /* BTE not available */
|
122 |
|
|
BTEFAIL_POISON, /* poison page */
|
123 |
|
|
BTEFAIL_PROT, /* Protection violation */
|
124 |
|
|
BTEFAIL_ACCESS, /* access error */
|
125 |
|
|
BTEFAIL_TOUT, /* Time out */
|
126 |
|
|
BTEFAIL_XTERR, /* Diretory error */
|
127 |
|
|
BTEFAIL_DIR, /* Diretory error */
|
128 |
|
|
BTEFAIL_ERROR, /* Generic error */
|
129 |
|
|
} bte_result_t;
|
130 |
|
|
|
131 |
|
|
|
132 |
|
|
/*
|
133 |
|
|
* Structure defining a bte. An instance of this
|
134 |
|
|
* structure is created in the nodepda for each
|
135 |
|
|
* bte on that node (as defined by BTES_PER_NODE)
|
136 |
|
|
* This structure contains everything necessary
|
137 |
|
|
* to work with a BTE.
|
138 |
|
|
*/
|
139 |
|
|
struct bteinfo_s {
|
140 |
|
|
u64 volatile notify ____cacheline_aligned;
|
141 |
|
|
char *bte_base_addr ____cacheline_aligned;
|
142 |
|
|
spinlock_t spinlock;
|
143 |
|
|
cnodeid_t bte_cnode; /* cnode */
|
144 |
|
|
int bte_error_count; /* Number of errors encountered */
|
145 |
|
|
int bte_num; /* 0 --> BTE0, 1 --> BTE1 */
|
146 |
|
|
int cleanup_active; /* Interface is locked for cleanup */
|
147 |
|
|
volatile bte_result_t bh_error; /* error while processing */
|
148 |
|
|
u64 volatile *most_rcnt_na;
|
149 |
|
|
void *scratch_buf; /* Node local scratch buffer */
|
150 |
|
|
};
|
151 |
|
|
|
152 |
|
|
|
153 |
|
|
/*
|
154 |
|
|
* Function prototypes (functions defined in bte.c, used elsewhere)
|
155 |
|
|
*/
|
156 |
|
|
extern bte_result_t bte_copy(u64, u64, u64, u64, void *);
|
157 |
|
|
extern bte_result_t bte_unaligned_copy(u64, u64, u64, u64);
|
158 |
|
|
extern void bte_error_handler(unsigned long);
|
159 |
|
|
|
160 |
|
|
|
161 |
|
|
/*
|
162 |
|
|
* The following is the prefered way of calling bte_unaligned_copy
|
163 |
|
|
* If the copy is fully cache line aligned, then bte_copy is
|
164 |
|
|
* used instead. Since bte_copy is inlined, this saves a call
|
165 |
|
|
* stack. NOTE: bte_copy is called synchronously and does block
|
166 |
|
|
* until the transfer is complete. In order to get the asynch
|
167 |
|
|
* version of bte_copy, you must perform this check yourself.
|
168 |
|
|
*/
|
169 |
|
|
#define BTE_UNALIGNED_COPY(src, dest, len, mode) \
|
170 |
|
|
(((len & L1_CACHE_MASK) || (src & L1_CACHE_MASK) || \
|
171 |
|
|
(dest & L1_CACHE_MASK)) ? \
|
172 |
|
|
bte_unaligned_copy(src, dest, len, mode) : \
|
173 |
|
|
bte_copy(src, dest, len, mode, NULL))
|
174 |
|
|
|
175 |
|
|
|
176 |
|
|
#endif /* _ASM_IA64_SN_BTE_H */
|