OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [include/] [asm-s390x/] [bitops.h] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/*
2
 *  include/asm-s390/bitops.h
3
 *
4
 *  S390 version
5
 *    Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
6
 *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
7
 *
8
 *  Derived from "include/asm-i386/bitops.h"
9
 *    Copyright (C) 1992, Linus Torvalds
10
 *
11
 */
12
 
13
#ifndef _S390_BITOPS_H
14
#define _S390_BITOPS_H
15
 
16
/*
17
 * bit 0 is the LSB of *addr; bit 63 is the MSB of *addr;
18
 * bit 64 is the LSB of *(addr+8). That combined with the
19
 * big endian byte order on S390 give the following bit
20
 * order in memory:
21
 *    3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30
22
 *    2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20
23
 *    1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10
24
 *    0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00
25
 * after that follows the next long with bit numbers
26
 *    7f 7e 7d 7c 7b 7a 79 78 77 76 75 74 73 72 71 70
27
 *    6f 6e 6d 6c 6b 6a 69 68 67 66 65 64 63 62 61 60
28
 *    5f 5e 5d 5c 5b 5a 59 58 57 56 55 54 53 52 51 50
29
 *    4f 4e 4d 4c 4b 4a 49 48 47 46 45 44 43 42 41 40
30
 * The reason for this bit ordering is the fact that
31
 * in the architecture independent code bits operations
32
 * of the form "flags |= (1 << bitnr)" are used INTERMIXED
33
 * with operation of the form "set_bit(bitnr, flags)".
34
 */
35
#include <linux/config.h>
36
 
37
/* set ALIGN_CS to 1 if the SMP safe bit operations should
38
 * align the address to 4 byte boundary. It seems to work
39
 * without the alignment.
40
 */
41
#ifdef __KERNEL__
42
#define ALIGN_CS 0
43
#else
44
#define ALIGN_CS 1
45
#ifndef CONFIG_SMP
46
#error "bitops won't work without CONFIG_SMP"
47
#endif
48
#endif
49
 
50
/* bitmap tables from arch/S390/kernel/bitmap.S */
51
extern const char _oi_bitmap[];
52
extern const char _ni_bitmap[];
53
extern const char _zb_findmap[];
54
 
55
#ifdef CONFIG_SMP
56
/*
57
 * SMP save set_bit routine based on compare and swap (CS)
58
 */
59
static __inline__ void set_bit_cs(unsigned long nr, volatile void * addr)
60
{
61
        unsigned long bits, mask;
62
        __asm__ __volatile__(
63
#if ALIGN_CS == 1
64
             "   lghi  %2,7\n"         /* CS must be aligned on 4 byte b. */
65
             "   ngr   %2,%1\n"        /* isolate last 2 bits of address */
66
             "   xgr   %1,%2\n"        /* make addr % 4 == 0 */
67
             "   sllg  %2,%2,3\n"
68
             "   agr   %0,%2\n"        /* add alignement to bitnr */
69
#endif
70
             "   lghi  %2,63\n"
71
             "   nr    %2,%0\n"        /* make shift value */
72
             "   xr    %0,%2\n"
73
             "   srlg  %0,%0,3\n"
74
             "   lghi  %3,1\n"
75
             "   la    %1,0(%0,%1)\n"  /* calc. address for CS */
76
             "   sllg  %3,%3,0(%2)\n"  /* make OR mask */
77
             "   lg    %0,0(%1)\n"
78
             "0: lgr   %2,%0\n"        /* CS loop starts here */
79
             "   ogr   %2,%3\n"        /* set bit */
80
             "   csg   %0,%2,0(%1)\n"
81
             "   jl    0b"
82
             : "+a" (nr), "+a" (addr), "=&a" (bits), "=&d" (mask) :
83
             : "cc", "memory" );
84
}
85
 
86
/*
87
 * SMP save clear_bit routine based on compare and swap (CS)
88
 */
89
static __inline__ void clear_bit_cs(unsigned long nr, volatile void * addr)
90
{
91
        unsigned long bits, mask;
92
        __asm__ __volatile__(
93
#if ALIGN_CS == 1
94
             "   lghi  %2,7\n"         /* CS must be aligned on 4 byte b. */
95
             "   ngr   %2,%1\n"        /* isolate last 2 bits of address */
96
             "   xgr   %1,%2\n"        /* make addr % 4 == 0 */
97
             "   sllg  %2,%2,3\n"
98
             "   agr   %0,%2\n"        /* add alignement to bitnr */
99
#endif
100
             "   lghi  %2,63\n"
101
             "   nr    %2,%0\n"        /* make shift value */
102
             "   xr    %0,%2\n"
103
             "   srlg  %0,%0,3\n"
104
             "   lghi  %3,-2\n"
105
             "   la    %1,0(%0,%1)\n"  /* calc. address for CS */
106
             "   lghi  %3,-2\n"
107
             "   rllg  %3,%3,0(%2)\n"  /* make AND mask */
108
             "   lg    %0,0(%1)\n"
109
             "0: lgr   %2,%0\n"        /* CS loop starts here */
110
             "   ngr   %2,%3\n"        /* clear bit */
111
             "   csg   %0,%2,0(%1)\n"
112
             "   jl    0b"
113
             : "+a" (nr), "+a" (addr), "=&a" (bits), "=&d" (mask) :
114
             : "cc", "memory" );
115
}
116
 
117
/*
118
 * SMP save change_bit routine based on compare and swap (CS)
119
 */
120
static __inline__ void change_bit_cs(unsigned long nr, volatile void * addr)
121
{
122
        unsigned long bits, mask;
123
        __asm__ __volatile__(
124
#if ALIGN_CS == 1
125
             "   lghi  %2,7\n"         /* CS must be aligned on 4 byte b. */
126
             "   ngr   %2,%1\n"        /* isolate last 2 bits of address */
127
             "   xgr   %1,%2\n"        /* make addr % 4 == 0 */
128
             "   sllg  %2,%2,3\n"
129
             "   agr   %0,%2\n"        /* add alignement to bitnr */
130
#endif
131
             "   lghi  %2,63\n"
132
             "   nr    %2,%0\n"        /* make shift value */
133
             "   xr    %0,%2\n"
134
             "   srlg  %0,%0,3\n"
135
             "   lghi  %3,1\n"
136
             "   la    %1,0(%0,%1)\n"  /* calc. address for CS */
137
             "   sllg  %3,%3,0(%2)\n"  /* make XR mask */
138
             "   lg    %0,0(%1)\n"
139
             "0: lgr   %2,%0\n"        /* CS loop starts here */
140
             "   xgr   %2,%3\n"        /* change bit */
141
             "   csg   %0,%2,0(%1)\n"
142
             "   jl    0b"
143
             : "+a" (nr), "+a" (addr), "=&a" (bits), "=&d" (mask) :
144
             : "cc", "memory" );
145
}
146
 
147
/*
148
 * SMP save test_and_set_bit routine based on compare and swap (CS)
149
 */
150
static __inline__ int
151
test_and_set_bit_cs(unsigned long nr, volatile void * addr)
152
{
153
        unsigned long bits, mask;
154
        __asm__ __volatile__(
155
#if ALIGN_CS == 1
156
             "   lghi  %2,7\n"         /* CS must be aligned on 4 byte b. */
157
             "   ngr   %2,%1\n"        /* isolate last 2 bits of address */
158
             "   xgr   %1,%2\n"        /* make addr % 4 == 0 */
159
             "   sllg  %2,%2,3\n"
160
             "   agr   %0,%2\n"        /* add alignement to bitnr */
161
#endif
162
             "   lghi  %2,63\n"
163
             "   nr    %2,%0\n"        /* make shift value */
164
             "   xr    %0,%2\n"
165
             "   srlg  %0,%0,3\n"
166
             "   lghi  %3,1\n"
167
             "   la    %1,0(%0,%1)\n"  /* calc. address for CS */
168
             "   sllg  %3,%3,0(%2)\n"  /* make OR mask */
169
             "   lg    %0,0(%1)\n"
170
             "0: lgr   %2,%0\n"        /* CS loop starts here */
171
             "   ogr   %2,%3\n"        /* set bit */
172
             "   csg   %0,%2,0(%1)\n"
173
             "   jl    0b\n"
174
             "   ngr   %0,%3\n"        /* isolate old bit */
175
             : "+a" (nr), "+a" (addr), "=&a" (bits), "=&d" (mask) :
176
             : "cc", "memory" );
177
        return nr != 0;
178
}
179
 
180
/*
181
 * SMP save test_and_clear_bit routine based on compare and swap (CS)
182
 */
183
static __inline__ int
184
test_and_clear_bit_cs(unsigned long nr, volatile void * addr)
185
{
186
        unsigned long bits, mask;
187
        __asm__ __volatile__(
188
#if ALIGN_CS == 1
189
             "   lghi  %2,7\n"         /* CS must be aligned on 4 byte b. */
190
             "   ngr   %2,%1\n"        /* isolate last 2 bits of address */
191
             "   xgr   %1,%2\n"        /* make addr % 4 == 0 */
192
             "   sllg  %2,%2,3\n"
193
             "   agr   %0,%2\n"        /* add alignement to bitnr */
194
#endif
195
             "   lghi  %2,63\n"
196
             "   nr    %2,%0\n"        /* make shift value */
197
             "   xr    %0,%2\n"
198
             "   srlg  %0,%0,3\n"
199
             "   lghi  %3,-2\n"
200
             "   la    %1,0(%0,%1)\n"  /* calc. address for CS */
201
             "   rllg  %3,%3,0(%2)\n"  /* make AND mask */
202
             "   lg    %0,0(%1)\n"
203
             "0: lgr   %2,%0\n"        /* CS loop starts here */
204
             "   ngr   %2,%3\n"        /* clear bit */
205
             "   csg   %0,%2,0(%1)\n"
206
             "   jl    0b\n"
207
             "   xgr   %0,%2\n"        /* isolate old bit */
208
             : "+a" (nr), "+a" (addr), "=&a" (bits), "=&d" (mask) :
209
             : "cc", "memory" );
210
        return nr != 0;
211
}
212
 
213
/*
214
 * SMP save test_and_change_bit routine based on compare and swap (CS)
215
 */
216
static __inline__ int
217
test_and_change_bit_cs(unsigned long nr, volatile void * addr)
218
{
219
        unsigned long bits, mask;
220
        __asm__ __volatile__(
221
#if ALIGN_CS == 1
222
             "   lghi  %2,7\n"         /* CS must be aligned on 4 byte b. */
223
             "   ngr   %2,%1\n"        /* isolate last 2 bits of address */
224
             "   xgr   %1,%2\n"        /* make addr % 4 == 0 */
225
             "   sllg  %2,%2,3\n"
226
             "   agr   %0,%2\n"        /* add alignement to bitnr */
227
#endif
228
             "   lghi  %2,63\n"
229
             "   nr    %2,%0\n"        /* make shift value */
230
             "   xr    %0,%2\n"
231
             "   srlg  %0,%0,3\n"
232
             "   lghi  %3,1\n"
233
             "   la    %1,0(%0,%1)\n"  /* calc. address for CS */
234
             "   sllg  %3,%3,0(%2)\n"  /* make OR mask */
235
             "   lg    %0,0(%1)\n"
236
             "0: lgr   %2,%0\n"        /* CS loop starts here */
237
             "   xgr   %2,%3\n"        /* change bit */
238
             "   csg   %0,%2,0(%1)\n"
239
             "   jl    0b\n"
240
             "   ngr   %0,%3\n"        /* isolate old bit */
241
             : "+a" (nr), "+a" (addr), "=&a" (bits), "=&d" (mask) :
242
             : "cc", "memory" );
243
        return nr != 0;
244
}
245
#endif /* CONFIG_SMP */
246
 
247
/*
248
 * fast, non-SMP set_bit routine
249
 */
250
static __inline__ void __set_bit(unsigned long nr, volatile void * addr)
251
{
252
        unsigned long reg1, reg2;
253
        __asm__ __volatile__(
254
             "   lghi  %1,56\n"
255
             "   lghi  %0,7\n"
256
             "   xgr   %1,%2\n"
257
             "   nr    %0,%2\n"
258
             "   srlg  %1,%1,3\n"
259
             "   la    %1,0(%1,%3)\n"
260
             "   la    %0,0(%0,%4)\n"
261
             "   oc    0(1,%1),0(%0)"
262
             : "=&a" (reg1), "=&a" (reg2)
263
             : "a" (nr), "a" (addr), "a" (&_oi_bitmap) : "cc", "memory" );
264
}
265
 
266
static __inline__ void
267
__constant_set_bit(const unsigned long nr, volatile void * addr)
268
{
269
  switch (nr&7) {
270
  case 0:
271
    __asm__ __volatile__ ("la 1,%0\n\t"
272
                          "oi 0(1),0x01"
273
                          : "=m" (*((volatile char *) addr + ((nr>>3)^7)))
274
                          : : "1", "cc", "memory");
275
    break;
276
  case 1:
277
    __asm__ __volatile__ ("la 1,%0\n\t"
278
                          "oi 0(1),0x02"
279
                          : "=m" (*((volatile char *) addr + ((nr>>3)^7)))
280
                          : : "1", "cc", "memory" );
281
    break;
282
  case 2:
283
    __asm__ __volatile__ ("la 1,%0\n\t"
284
                          "oi 0(1),0x04"
285
                          : "=m" (*((volatile char *) addr + ((nr>>3)^7)))
286
                          : : "1", "cc", "memory" );
287
    break;
288
  case 3:
289
    __asm__ __volatile__ ("la 1,%0\n\t"
290
                          "oi 0(1),0x08"
291
                          : "=m" (*((volatile char *) addr + ((nr>>3)^7)))
292
                          : : "1", "cc", "memory" );
293
    break;
294
  case 4:
295
    __asm__ __volatile__ ("la 1,%0\n\t"
296
                          "oi 0(1),0x10"
297
                          : "=m" (*((volatile char *) addr + ((nr>>3)^7)))
298
                          : : "1", "cc", "memory" );
299
    break;
300
  case 5:
301
    __asm__ __volatile__ ("la 1,%0\n\t"
302
                          "oi 0(1),0x20"
303
                          : "=m" (*((volatile char *) addr + ((nr>>3)^7)))
304
                          : : "1", "cc", "memory" );
305
    break;
306
  case 6:
307
    __asm__ __volatile__ ("la 1,%0\n\t"
308
                          "oi 0(1),0x40"
309
                          : "=m" (*((volatile char *) addr + ((nr>>3)^7)))
310
                          : : "1", "cc", "memory" );
311
    break;
312
  case 7:
313
    __asm__ __volatile__ ("la 1,%0\n\t"
314
                          "oi 0(1),0x80"
315
                          : "=m" (*((volatile char *) addr + ((nr>>3)^7)))
316
                          : : "1", "cc", "memory" );
317
    break;
318
  }
319
}
320
 
321
#define set_bit_simple(nr,addr) \
322
(__builtin_constant_p((nr)) ? \
323
 __constant_set_bit((nr),(addr)) : \
324
 __set_bit((nr),(addr)) )
325
 
326
/*
327
 * fast, non-SMP clear_bit routine
328
 */
329
static __inline__ void
330
__clear_bit(unsigned long nr, volatile void * addr)
331
{
332
        unsigned long reg1, reg2;
333
        __asm__ __volatile__(
334
             "   lghi  %1,56\n"
335
             "   lghi  %0,7\n"
336
             "   xgr   %1,%2\n"
337
             "   nr    %0,%2\n"
338
             "   srlg  %1,%1,3\n"
339
             "   la    %1,0(%1,%3)\n"
340
             "   la    %0,0(%0,%4)\n"
341
             "   nc    0(1,%1),0(%0)"
342
             : "=&a" (reg1), "=&a" (reg2)
343
             : "d" (nr), "a" (addr), "a" (&_ni_bitmap) : "cc", "memory" );
344
}
345
 
346
static __inline__ void
347
__constant_clear_bit(const unsigned long nr, volatile void * addr)
348
{
349
  switch (nr&7) {
350
  case 0:
351
    __asm__ __volatile__ ("la 1,%0\n\t"
352
                          "ni 0(1),0xFE"
353
                          : "=m" (*((volatile char *) addr + ((nr>>3)^7)))
354
                          : : "1", "cc", "memory" );
355
    break;
356
  case 1:
357
    __asm__ __volatile__ ("la 1,%0\n\t"
358
                          "ni 0(1),0xFD"
359
                          : "=m" (*((volatile char *) addr + ((nr>>3)^7)))
360
                          : : "1", "cc", "memory" );
361
    break;
362
  case 2:
363
    __asm__ __volatile__ ("la 1,%0\n\t"
364
                          "ni 0(1),0xFB"
365
                          : "=m" (*((volatile char *) addr + ((nr>>3)^7)))
366
                          : : "1", "cc", "memory" );
367
    break;
368
  case 3:
369
    __asm__ __volatile__ ("la 1,%0\n\t"
370
                          "ni 0(1),0xF7"
371
                          : "=m" (*((volatile char *) addr + ((nr>>3)^7)))
372
                          : : "1", "cc", "memory" );
373
    break;
374
  case 4:
375
    __asm__ __volatile__ ("la 1,%0\n\t"
376
                          "ni 0(1),0xEF"
377
                          : "=m" (*((volatile char *) addr + ((nr>>3)^7)))
378
                          : : "cc", "memory" );
379
    break;
380
  case 5:
381
    __asm__ __volatile__ ("la 1,%0\n\t"
382
                          "ni 0(1),0xDF"
383
                          : "=m" (*((volatile char *) addr + ((nr>>3)^7)))
384
                          : : "1", "cc", "memory" );
385
    break;
386
  case 6:
387
    __asm__ __volatile__ ("la 1,%0\n\t"
388
                          "ni 0(1),0xBF"
389
                          : "=m" (*((volatile char *) addr + ((nr>>3)^7)))
390
                          : : "1", "cc", "memory" );
391
    break;
392
  case 7:
393
    __asm__ __volatile__ ("la 1,%0\n\t"
394
                          "ni 0(1),0x7F"
395
                          : "=m" (*((volatile char *) addr + ((nr>>3)^7)))
396
                          : : "1", "cc", "memory" );
397
    break;
398
  }
399
}
400
 
401
#define clear_bit_simple(nr,addr) \
402
(__builtin_constant_p((nr)) ? \
403
 __constant_clear_bit((nr),(addr)) : \
404
 __clear_bit((nr),(addr)) )
405
 
406
/*
407
 * fast, non-SMP change_bit routine
408
 */
409
static __inline__ void __change_bit(unsigned long nr, volatile void * addr)
410
{
411
        unsigned long reg1, reg2;
412
        __asm__ __volatile__(
413
             "   lghi  %1,56\n"
414
             "   lghi  %0,7\n"
415
             "   xgr   %1,%2\n"
416
             "   nr    %0,%2\n"
417
             "   srlg  %1,%1,3\n"
418
             "   la    %1,0(%1,%3)\n"
419
             "   la    %0,0(%0,%4)\n"
420
             "   xc    0(1,%1),0(%0)"
421
             : "=&a" (reg1), "=&a" (reg2)
422
             : "d" (nr), "a" (addr), "a" (&_oi_bitmap) : "cc", "memory" );
423
}
424
 
425
static __inline__ void
426
__constant_change_bit(const unsigned long nr, volatile void * addr)
427
{
428
  switch (nr&7) {
429
  case 0:
430
    __asm__ __volatile__ ("la 1,%0\n\t"
431
                          "xi 0(1),0x01"
432
                          : "=m" (*((volatile char *) addr + ((nr>>3)^7)))
433
                          : : "cc", "memory" );
434
    break;
435
  case 1:
436
    __asm__ __volatile__ ("la 1,%0\n\t"
437
                          "xi 0(1),0x02"
438
                          : "=m" (*((volatile char *) addr + ((nr>>3)^7)))
439
                          : : "cc", "memory" );
440
    break;
441
  case 2:
442
    __asm__ __volatile__ ("la 1,%0\n\t"
443
                          "xi 0(1),0x04"
444
                          : "=m" (*((volatile char *) addr + ((nr>>3)^7)))
445
                          : : "cc", "memory" );
446
    break;
447
  case 3:
448
    __asm__ __volatile__ ("la 1,%0\n\t"
449
                          "xi 0(1),0x08"
450
                          : "=m" (*((volatile char *) addr + ((nr>>3)^7)))
451
                          : : "cc", "memory" );
452
    break;
453
  case 4:
454
    __asm__ __volatile__ ("la 1,%0\n\t"
455
                          "xi 0(1),0x10"
456
                          : "=m" (*((volatile char *) addr + ((nr>>3)^7)))
457
                          : : "cc", "memory" );
458
    break;
459
  case 5:
460
    __asm__ __volatile__ ("la 1,%0\n\t"
461
                          "xi 0(1),0x20"
462
                          : "=m" (*((volatile char *) addr + ((nr>>3)^7)))
463
                          : : "1", "cc", "memory" );
464
    break;
465
  case 6:
466
    __asm__ __volatile__ ("la 1,%0\n\t"
467
                          "xi 0(1),0x40"
468
                          : "=m" (*((volatile char *) addr + ((nr>>3)^7)))
469
                          : : "1", "cc", "memory" );
470
    break;
471
  case 7:
472
    __asm__ __volatile__ ("la 1,%0\n\t"
473
                          "xi 0(1),0x80"
474
                          : "=m" (*((volatile char *) addr + ((nr>>3)^7)))
475
                          : : "1", "cc", "memory" );
476
    break;
477
  }
478
}
479
 
480
#define change_bit_simple(nr,addr) \
481
(__builtin_constant_p((nr)) ? \
482
 __constant_change_bit((nr),(addr)) : \
483
 __change_bit((nr),(addr)) )
484
 
485
/*
486
 * fast, non-SMP test_and_set_bit routine
487
 */
488
static __inline__ int
489
test_and_set_bit_simple(unsigned long nr, volatile void * addr)
490
{
491
        unsigned long reg1, reg2;
492
        int oldbit;
493
        __asm__ __volatile__(
494
             "   lghi  %1,56\n"
495
             "   lghi  %2,7\n"
496
             "   xgr   %1,%3\n"
497
             "   nr    %2,%3\n"
498
             "   srlg  %1,%1,3\n"
499
             "   la    %1,0(%1,%4)\n"
500
             "   ic    %0,0(%1)\n"
501
             "   srl   %0,0(%2)\n"
502
             "   la    %2,0(%2,%5)\n"
503
             "   oc    0(1,%1),0(%2)"
504
             : "=&d" (oldbit), "=&a" (reg1), "=&a" (reg2)
505
             : "d" (nr), "a" (addr), "a" (&_oi_bitmap) : "cc", "memory" );
506
        return oldbit & 1;
507
}
508
#define __test_and_set_bit(X,Y)         test_and_set_bit_simple(X,Y)
509
 
510
/*
511
 * fast, non-SMP test_and_clear_bit routine
512
 */
513
static __inline__ int
514
test_and_clear_bit_simple(unsigned long nr, volatile void * addr)
515
{
516
        unsigned long reg1, reg2;
517
        int oldbit;
518
 
519
        __asm__ __volatile__(
520
             "   lghi  %1,56\n"
521
             "   lghi  %2,7\n"
522
             "   xgr   %1,%3\n"
523
             "   nr    %2,%3\n"
524
             "   srlg  %1,%1,3\n"
525
             "   la    %1,0(%1,%4)\n"
526
             "   ic    %0,0(%1)\n"
527
             "   srl   %0,0(%2)\n"
528
             "   la    %2,0(%2,%5)\n"
529
             "   nc    0(1,%1),0(%2)"
530
             : "=&d" (oldbit), "=&a" (reg1), "=&a" (reg2)
531
             : "d" (nr), "a" (addr), "a" (&_ni_bitmap) : "cc", "memory" );
532
        return oldbit & 1;
533
}
534
#define __test_and_clear_bit(X,Y)       test_and_clear_bit_simple(X,Y)
535
 
536
/*
537
 * fast, non-SMP test_and_change_bit routine
538
 */
539
static __inline__ int
540
test_and_change_bit_simple(unsigned long nr, volatile void * addr)
541
{
542
        unsigned long reg1, reg2;
543
        int oldbit;
544
 
545
        __asm__ __volatile__(
546
             "   lghi  %1,56\n"
547
             "   lghi  %2,7\n"
548
             "   xgr   %1,%3\n"
549
             "   nr    %2,%3\n"
550
             "   srlg  %1,%1,3\n"
551
             "   la    %1,0(%1,%4)\n"
552
             "   ic    %0,0(%1)\n"
553
             "   srl   %0,0(%2)\n"
554
             "   la    %2,0(%2,%5)\n"
555
             "   xc    0(1,%1),0(%2)"
556
             : "=&d" (oldbit), "=&a" (reg1), "=&a" (reg2)
557
             : "d" (nr), "a" (addr), "a" (&_oi_bitmap) : "cc", "memory" );
558
        return oldbit & 1;
559
}
560
#define __test_and_change_bit(X,Y)      test_and_change_bit_simple(X,Y)
561
 
562
#ifdef CONFIG_SMP
563
#define set_bit             set_bit_cs
564
#define clear_bit           clear_bit_cs
565
#define change_bit          change_bit_cs
566
#define test_and_set_bit    test_and_set_bit_cs
567
#define test_and_clear_bit  test_and_clear_bit_cs
568
#define test_and_change_bit test_and_change_bit_cs
569
#else
570
#define set_bit             set_bit_simple
571
#define clear_bit           clear_bit_simple
572
#define change_bit          change_bit_simple
573
#define test_and_set_bit    test_and_set_bit_simple
574
#define test_and_clear_bit  test_and_clear_bit_simple
575
#define test_and_change_bit test_and_change_bit_simple
576
#endif
577
 
578
 
579
/*
580
 * This routine doesn't need to be atomic.
581
 */
582
 
583
static __inline__ int __test_bit(unsigned long nr, volatile void * addr)
584
{
585
        unsigned long reg1, reg2;
586
        int oldbit;
587
 
588
        __asm__ __volatile__(
589
             "   lghi  %2,56\n"
590
             "   lghi  %1,7\n"
591
             "   xgr   %2,%3\n"
592
             "   nr    %1,%3\n"
593
             "   srlg  %2,%2,3\n"
594
             "   ic    %0,0(%2,%4)\n"
595
             "   srl   %0,0(%1)\n"
596
             : "=&d" (oldbit), "=&a" (reg1), "=&a" (reg2)
597
             : "d" (nr), "a" (addr) : "cc" );
598
        return oldbit & 1;
599
}
600
 
601
static __inline__ int
602
__constant_test_bit(unsigned long nr, volatile void * addr) {
603
    return (((volatile char *) addr)[(nr>>3)^7] & (1<<(nr&7))) != 0;
604
}
605
 
606
#define test_bit(nr,addr) \
607
(__builtin_constant_p((nr)) ? \
608
 __constant_test_bit((nr),(addr)) : \
609
 __test_bit((nr),(addr)) )
610
 
611
/*
612
 * Find-bit routines..
613
 */
614
static __inline__ unsigned long
615
find_first_zero_bit(void * addr, unsigned long size)
616
{
617
        unsigned long res, cmp, count;
618
 
619
        if (!size)
620
                return 0;
621
        __asm__("   lghi  %1,-1\n"
622
                "   lgr   %2,%3\n"
623
                "   slgr  %0,%0\n"
624
                "   aghi  %2,63\n"
625
                "   srlg  %2,%2,6\n"
626
                "0: cg    %1,0(%0,%4)\n"
627
                "   jne   1f\n"
628
                "   aghi  %0,8\n"
629
                "   brct  %2,0b\n"
630
                "   lgr   %0,%3\n"
631
                "   j     5f\n"
632
                "1: lg    %2,0(%0,%4)\n"
633
                "   sllg  %0,%0,3\n"
634
                "   clr   %2,%1\n"
635
                "   jne   2f\n"
636
                "   aghi  %0,32\n"
637
                "   srlg  %2,%2,32\n"
638
                "2: lghi  %1,0xff\n"
639
                "   tmll  %2,0xffff\n"
640
                "   jno   3f\n"
641
                "   aghi  %0,16\n"
642
                "   srl   %2,16\n"
643
                "3: tmll  %2,0x00ff\n"
644
                "   jno   4f\n"
645
                "   aghi  %0,8\n"
646
                "   srl   %2,8\n"
647
                "4: ngr   %2,%1\n"
648
                "   ic    %2,0(%2,%5)\n"
649
                "   algr  %0,%2\n"
650
                "5:"
651
                : "=&a" (res), "=&d" (cmp), "=&a" (count)
652
                : "a" (size), "a" (addr), "a" (&_zb_findmap) : "cc" );
653
        return (res < size) ? res : size;
654
}
655
 
656
static __inline__ unsigned long
657
find_next_zero_bit (void * addr, unsigned long size, unsigned long offset)
658
{
659
        unsigned long * p = ((unsigned long *) addr) + (offset >> 6);
660
        unsigned long bitvec, reg;
661
        unsigned long set, bit = offset & 63, res;
662
 
663
        if (bit) {
664
                /*
665
                 * Look for zero in first word
666
                 */
667
                bitvec = (*p) >> bit;
668
                __asm__("   lhi  %2,-1\n"
669
                        "   slgr %0,%0\n"
670
                        "   clr  %1,%2\n"
671
                        "   jne  0f\n"
672
                        "   aghi %0,32\n"
673
                        "   srlg %1,%1,32\n"
674
                        "0: lghi %2,0xff\n"
675
                        "   tmll %1,0xffff\n"
676
                        "   jno  1f\n"
677
                        "   aghi %0,16\n"
678
                        "   srlg %1,%1,16\n"
679
                        "1: tmll %1,0x00ff\n"
680
                        "   jno  2f\n"
681
                        "   aghi %0,8\n"
682
                        "   srlg %1,%1,8\n"
683
                        "2: ngr  %1,%2\n"
684
                        "   ic   %1,0(%1,%3)\n"
685
                        "   algr %0,%1"
686
                        : "=&d" (set), "+a" (bitvec), "=&d" (reg)
687
                        : "a" (&_zb_findmap) : "cc" );
688
                if (set < (64 - bit))
689
                        return set + offset;
690
                offset += 64 - bit;
691
                p++;
692
        }
693
        /*
694
         * No zero yet, search remaining full words for a zero
695
         */
696
        res = find_first_zero_bit (p, size - 64 * (p - (unsigned long *) addr));
697
        return (offset + res);
698
}
699
 
700
/*
701
 * ffz = Find First Zero in word. Undefined if no zero exists,
702
 * so code should check against ~0UL first..
703
 */
704
static __inline__ unsigned long ffz(unsigned long word)
705
{
706
        unsigned long reg;
707
        int result;
708
 
709
        __asm__("   lhi  %2,-1\n"
710
                "   slgr %0,%0\n"
711
                "   clr  %1,%2\n"
712
                "   jne  0f\n"
713
                "   aghi %0,32\n"
714
                "   srlg %1,%1,32\n"
715
                "0: lghi %2,0xff\n"
716
                "   tmll %1,0xffff\n"
717
                "   jno  1f\n"
718
                "   aghi %0,16\n"
719
                "   srlg %1,%1,16\n"
720
                "1: tmll %1,0x00ff\n"
721
                "   jno  2f\n"
722
                "   aghi %0,8\n"
723
                "   srlg %1,%1,8\n"
724
                "2: ngr  %1,%2\n"
725
                "   ic   %1,0(%1,%3)\n"
726
                "   algr %0,%1"
727
                : "=&d" (result), "+a" (word), "=&d" (reg)
728
                : "a" (&_zb_findmap) : "cc" );
729
        return result;
730
}
731
 
732
/*
733
 * ffs: find first bit set. This is defined the same way as
734
 * the libc and compiler builtin ffs routines, therefore
735
 * differs in spirit from the above ffz (man ffs).
736
 */
737
 
738
extern int __inline__ ffs (int x)
739
{
740
        int r;
741
 
742
        if (x == 0)
743
          return 0;
744
        __asm__("    slr  %0,%0\n"
745
                "    tml  %1,0xffff\n"
746
                "    jnz  0f\n"
747
                "    ahi  %0,16\n"
748
                "    srl  %1,16\n"
749
                "0:  tml  %1,0x00ff\n"
750
                "    jnz  1f\n"
751
                "    ahi  %0,8\n"
752
                "    srl  %1,8\n"
753
                "1:  tml  %1,0x000f\n"
754
                "    jnz  2f\n"
755
                "    ahi  %0,4\n"
756
                "    srl  %1,4\n"
757
                "2:  tml  %1,0x0003\n"
758
                "    jnz  3f\n"
759
                "    ahi  %0,2\n"
760
                "    srl  %1,2\n"
761
                "3:  tml  %1,0x0001\n"
762
                "    jnz  4f\n"
763
                "    ahi  %0,1\n"
764
                "4:"
765
                : "=&d" (r), "+d" (x) : : "cc" );
766
        return r+1;
767
}
768
 
769
/*
770
 * hweightN: returns the hamming weight (i.e. the number
771
 * of bits set) of a N-bit word
772
 */
773
 
774
#define hweight32(x) generic_hweight32(x)
775
#define hweight16(x) generic_hweight16(x)
776
#define hweight8(x) generic_hweight8(x)
777
 
778
 
779
#ifdef __KERNEL__
780
 
781
/*
782
 * ATTENTION: intel byte ordering convention for ext2 and minix !!
783
 * bit 0 is the LSB of addr; bit 31 is the MSB of addr;
784
 * bit 32 is the LSB of (addr+4).
785
 * That combined with the little endian byte order of Intel gives the
786
 * following bit order in memory:
787
 *    07 06 05 04 03 02 01 00 15 14 13 12 11 10 09 08 \
788
 *    23 22 21 20 19 18 17 16 31 30 29 28 27 26 25 24
789
 */
790
 
791
#define ext2_set_bit(nr, addr)       test_and_set_bit((nr)^56, addr)
792
#define ext2_clear_bit(nr, addr)     test_and_clear_bit((nr)^56, addr)
793
#define ext2_test_bit(nr, addr)      test_bit((nr)^56, addr)
794
static __inline__ unsigned long
795
ext2_find_first_zero_bit(void *vaddr, unsigned long size)
796
{
797
        unsigned long res, cmp, count;
798
 
799
        if (!size)
800
                return 0;
801
        __asm__("   lghi  %1,-1\n"
802
                "   lgr   %2,%3\n"
803
                "   aghi  %2,63\n"
804
                "   srlg  %2,%2,6\n"
805
                "   slgr  %0,%0\n"
806
                "0: clg   %1,0(%0,%4)\n"
807
                "   jne   1f\n"
808
                "   aghi  %0,8\n"
809
                "   brct  %2,0b\n"
810
                "   lgr   %0,%3\n"
811
                "   j     5f\n"
812
                "1: cl    %1,0(%0,%4)\n"
813
                "   jne   2f\n"
814
                "   aghi  %0,4\n"
815
                "2: l     %2,0(%0,%4)\n"
816
                "   sllg  %0,%0,3\n"
817
                "   aghi  %0,24\n"
818
                "   lghi  %1,0xff\n"
819
                "   tmlh  %2,0xffff\n"
820
                "   jo    3f\n"
821
                "   aghi  %0,-16\n"
822
                "   srl   %2,16\n"
823
                "3: tmll  %2,0xff00\n"
824
                "   jo    4f\n"
825
                "   aghi  %0,-8\n"
826
                "   srl   %2,8\n"
827
                "4: ngr   %2,%1\n"
828
                "   ic    %2,0(%2,%5)\n"
829
                "   algr  %0,%2\n"
830
                "5:"
831
                : "=&a" (res), "=&d" (cmp), "=&a" (count)
832
                : "a" (size), "a" (vaddr), "a" (&_zb_findmap) : "cc" );
833
        return (res < size) ? res : size;
834
}
835
 
836
static __inline__ unsigned long
837
ext2_find_next_zero_bit(void *vaddr, unsigned long size, unsigned long offset)
838
{
839
        unsigned long *addr = vaddr;
840
        unsigned long *p = addr + (offset >> 6);
841
        unsigned long word, reg;
842
        unsigned long bit = offset & 63UL, res;
843
 
844
        if (offset >= size)
845
                return size;
846
 
847
        if (bit) {
848
                __asm__("   lrvg %0,%1" /* load reversed, neat instruction */
849
                        : "=a" (word) : "m" (*p) );
850
                word >>= bit;
851
                res = bit;
852
                /* Look for zero in first 8 byte word */
853
                __asm__("   lghi %2,0xff\n"
854
                        "   tmll %1,0xffff\n"
855
                        "   jno  2f\n"
856
                        "   ahi  %0,16\n"
857
                        "   srlg %1,%1,16\n"
858
                        "0: tmll %1,0xffff\n"
859
                        "   jno  2f\n"
860
                        "   ahi  %0,16\n"
861
                        "   srlg %1,%1,16\n"
862
                        "1: tmll %1,0xffff\n"
863
                        "   jno  2f\n"
864
                        "   ahi  %0,16\n"
865
                        "   srl  %1,16\n"
866
                        "2: tmll %1,0x00ff\n"
867
                        "   jno  3f\n"
868
                        "   ahi  %0,8\n"
869
                        "   srl  %1,8\n"
870
                        "3: ngr  %1,%2\n"
871
                        "   ic   %1,0(%1,%3)\n"
872
                        "   alr  %0,%1"
873
                        : "+&d" (res), "+a" (word), "=&d" (reg)
874
                        : "a" (&_zb_findmap) : "cc" );
875
                if (res < 64)
876
                        return (p - addr)*64 + res;
877
                p++;
878
        }
879
        /* No zero yet, search remaining full bytes for a zero */
880
        res = ext2_find_first_zero_bit (p, size - 64 * (p - addr));
881
        return (p - addr) * 64 + res;
882
}
883
 
884
/* Bitmap functions for the minix filesystem.  */
885
/* FIXME !!! */
886
#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
887
#define minix_set_bit(nr,addr) set_bit(nr,addr)
888
#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
889
#define minix_test_bit(nr,addr) test_bit(nr,addr)
890
#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
891
 
892
#endif /* __KERNEL__ */
893
 
894
#endif /* _S390_BITOPS_H */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.