OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [include/] [asm-m68k/] [string.h] - Blame information for rev 1774

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1276 phoenix
#ifndef _M68K_STRING_H_
2
#define _M68K_STRING_H_
3
 
4
#include <asm/setup.h>
5
#include <asm/page.h>
6
 
7
#define __HAVE_ARCH_STRCPY
8
static inline char * strcpy(char * dest,const char *src)
9
{
10
  char *xdest = dest;
11
 
12
  __asm__ __volatile__
13
       ("1:\tmoveb %1@+,%0@+\n\t"
14
        "jne 1b"
15
        : "=a" (dest), "=a" (src)
16
        : "0" (dest), "1" (src) : "memory");
17
  return xdest;
18
}
19
 
20
#define __HAVE_ARCH_STRNCPY
21
static inline char * strncpy(char *dest, const char *src, size_t n)
22
{
23
  char *xdest = dest;
24
 
25
  if (n == 0)
26
    return xdest;
27
 
28
  __asm__ __volatile__
29
       ("1:\tmoveb %1@+,%0@+\n\t"
30
        "jeq 2f\n\t"
31
        "subql #1,%2\n\t"
32
        "jne 1b\n\t"
33
        "2:"
34
        : "=a" (dest), "=a" (src), "=d" (n)
35
        : "0" (dest), "1" (src), "2" (n)
36
        : "memory");
37
  return xdest;
38
}
39
 
40
#define __HAVE_ARCH_STRCAT
41
static inline char * strcat(char * dest, const char * src)
42
{
43
        char *tmp = dest;
44
 
45
        while (*dest)
46
                dest++;
47
        while ((*dest++ = *src++))
48
                ;
49
 
50
        return tmp;
51
}
52
 
53
#define __HAVE_ARCH_STRNCAT
54
static inline char * strncat(char *dest, const char *src, size_t count)
55
{
56
        char *tmp = dest;
57
 
58
        if (count) {
59
                while (*dest)
60
                        dest++;
61
                while ((*dest++ = *src++)) {
62
                        if (--count == 0) {
63
                                *dest++='\0';
64
                                break;
65
                        }
66
                }
67
        }
68
 
69
        return tmp;
70
}
71
 
72
#define __HAVE_ARCH_STRCHR
73
static inline char * strchr(const char * s, int c)
74
{
75
  const char ch = c;
76
 
77
  for(; *s != ch; ++s)
78
    if (*s == '\0')
79
      return( NULL );
80
  return( (char *) s);
81
}
82
 
83
#if 0
84
#define __HAVE_ARCH_STRPBRK
85
static inline char *strpbrk(const char *cs,const char *ct)
86
{
87
  const char *sc1,*sc2;
88
 
89
  for( sc1 = cs; *sc1 != '\0'; ++sc1)
90
    for( sc2 = ct; *sc2 != '\0'; ++sc2)
91
      if (*sc1 == *sc2)
92
        return((char *) sc1);
93
  return( NULL );
94
}
95
#endif
96
 
97
#if 0
98
#define __HAVE_ARCH_STRSPN
99
static inline size_t strspn(const char *s, const char *accept)
100
{
101
  const char *p;
102
  const char *a;
103
  size_t count = 0;
104
 
105
  for (p = s; *p != '\0'; ++p)
106
    {
107
      for (a = accept; *a != '\0'; ++a)
108
        if (*p == *a)
109
          break;
110
      if (*a == '\0')
111
        return count;
112
      else
113
        ++count;
114
    }
115
 
116
  return count;
117
}
118
#endif
119
 
120
#if 0
121
#define __HAVE_ARCH_STRTOK
122
static inline char *strtok(char *s, const char *ct)
123
{
124
  char *sbegin, *send;
125
 
126
  sbegin  = s ? s : ___strtok;
127
  if (!sbegin) {
128
          return NULL;
129
  }
130
  sbegin += strspn(sbegin,ct);
131
  if (*sbegin == '\0') {
132
    ___strtok = NULL;
133
    return( NULL );
134
  }
135
  send = strpbrk( sbegin, ct);
136
  if (send && *send != '\0')
137
    *send++ = '\0';
138
  ___strtok = send;
139
  return (sbegin);
140
}
141
#endif
142
 
143
/* strstr !! */
144
 
145
#define __HAVE_ARCH_STRLEN
146
static inline size_t strlen(const char * s)
147
{
148
  const char *sc;
149
  for (sc = s; *sc != '\0'; ++sc) ;
150
  return(sc - s);
151
}
152
 
153
/* strnlen !! */
154
 
155
#define __HAVE_ARCH_STRCMP
156
static inline int strcmp(const char * cs,const char * ct)
157
{
158
  char __res;
159
 
160
  __asm__
161
       ("1:\tmoveb %0@+,%2\n\t" /* get *cs */
162
        "cmpb %1@+,%2\n\t"      /* compare a byte */
163
        "jne  2f\n\t"           /* not equal, break out */
164
        "tstb %2\n\t"           /* at end of cs? */
165
        "jne  1b\n\t"           /* no, keep going */
166
        "jra  3f\n\t"           /* strings are equal */
167
        "2:\tsubb %1@-,%2\n\t"  /* *cs - *ct */
168
        "3:"
169
        : "=a" (cs), "=a" (ct), "=d" (__res)
170
        : "0" (cs), "1" (ct));
171
  return __res;
172
}
173
 
174
#define __HAVE_ARCH_STRNCMP
175
static inline int strncmp(const char * cs,const char * ct,size_t count)
176
{
177
  char __res;
178
 
179
  if (!count)
180
    return 0;
181
  __asm__
182
       ("1:\tmovb %0@+,%3\n\t"          /* get *cs */
183
        "cmpb   %1@+,%3\n\t"            /* compare a byte */
184
        "jne    3f\n\t"                 /* not equal, break out */
185
        "tstb   %3\n\t"                 /* at end of cs? */
186
        "jeq    4f\n\t"                 /* yes, all done */
187
        "subql  #1,%2\n\t"              /* no, adjust count */
188
        "jne    1b\n\t"                 /* more to do, keep going */
189
        "2:\tmoveq #0,%3\n\t"           /* strings are equal */
190
        "jra    4f\n\t"
191
        "3:\tsubb %1@-,%3\n\t"          /* *cs - *ct */
192
        "4:"
193
        : "=a" (cs), "=a" (ct), "=d" (count), "=d" (__res)
194
        : "0" (cs), "1" (ct), "2" (count));
195
  return __res;
196
}
197
 
198
#define __HAVE_ARCH_MEMSET
199
/*
200
 * This is really ugly, but its highly optimizatiable by the
201
 * compiler and is meant as compensation for gcc's missing
202
 * __builtin_memset(). For the 680[23]0 it might be worth considering
203
 * the optimal number of misaligned writes compared to the number of
204
 * tests'n'branches needed to align the destination address. The
205
 * 680[46]0 doesn't really care due to their copy-back caches.
206
 *                                              10/09/96 - Jes Sorensen
207
 */
208
static inline void * __memset_g(void * s, int c, size_t count)
209
{
210
  void *xs = s;
211
  size_t temp;
212
 
213
  if (!count)
214
    return xs;
215
 
216
  c &= 0xff;
217
  c |= c << 8;
218
  c |= c << 16;
219
 
220
  if (count < 36){
221
          long *ls = s;
222
 
223
          switch(count){
224
          case 32: case 33: case 34: case 35:
225
                  *ls++ = c;
226
          case 28: case 29: case 30: case 31:
227
                  *ls++ = c;
228
          case 24: case 25: case 26: case 27:
229
                  *ls++ = c;
230
          case 20: case 21: case 22: case 23:
231
                  *ls++ = c;
232
          case 16: case 17: case 18: case 19:
233
                  *ls++ = c;
234
          case 12: case 13: case 14: case 15:
235
                  *ls++ = c;
236
          case 8: case 9: case 10: case 11:
237
                  *ls++ = c;
238
          case 4: case 5: case 6: case 7:
239
                  *ls++ = c;
240
                  break;
241
          default:
242
                  break;
243
          }
244
          s = ls;
245
          if (count & 0x02){
246
                  short *ss = s;
247
                  *ss++ = c;
248
                  s = ss;
249
          }
250
          if (count & 0x01){
251
                  char *cs = s;
252
                  *cs++ = c;
253
                  s = cs;
254
          }
255
          return xs;
256
  }
257
 
258
  if ((long) s & 1)
259
    {
260
      char *cs = s;
261
      *cs++ = c;
262
      s = cs;
263
      count--;
264
    }
265
  if (count > 2 && (long) s & 2)
266
    {
267
      short *ss = s;
268
      *ss++ = c;
269
      s = ss;
270
      count -= 2;
271
    }
272
  temp = count >> 2;
273
  if (temp)
274
    {
275
      long *ls = s;
276
      temp--;
277
      do
278
        *ls++ = c;
279
      while (temp--);
280
      s = ls;
281
    }
282
  if (count & 2)
283
    {
284
      short *ss = s;
285
      *ss++ = c;
286
      s = ss;
287
    }
288
  if (count & 1)
289
    {
290
      char *cs = s;
291
      *cs = c;
292
    }
293
  return xs;
294
}
295
 
296
/*
297
 * __memset_page assumes that data is longword aligned. Most, if not
298
 * all, of these page sized memsets are performed on page aligned
299
 * areas, thus we do not need to check if the destination is longword
300
 * aligned. Of course we suffer a serious performance loss if this is
301
 * not the case but I think the risk of this ever happening is
302
 * extremely small. We spend a lot of time clearing pages in
303
 * get_empty_page() so I think it is worth it anyway. Besides, the
304
 * 680[46]0 do not really care about misaligned writes due to their
305
 * copy-back cache.
306
 *
307
 * The optimized case for the 680[46]0 is implemented using the move16
308
 * instruction. My tests showed that this implementation is 35-45%
309
 * faster than the original implementation using movel, the only
310
 * caveat is that the destination address must be 16-byte aligned.
311
 *                                            01/09/96 - Jes Sorensen
312
 */
313
static inline void * __memset_page(void * s,int c,size_t count)
314
{
315
  unsigned long data, tmp;
316
  void *xs, *sp;
317
 
318
  xs = sp = s;
319
 
320
  c = c & 255;
321
  data = c | (c << 8);
322
  data |= data << 16;
323
 
324
#ifdef CPU_M68040_OR_M68060_ONLY
325
 
326
  if (((unsigned long) s) & 0x0f)
327
          __memset_g(s, c, count);
328
  else{
329
          *((unsigned long *)(s))++ = data;
330
          *((unsigned long *)(s))++ = data;
331
          *((unsigned long *)(s))++ = data;
332
          *((unsigned long *)(s))++ = data;
333
 
334
          __asm__ __volatile__("1:\t"
335
                               ".chip 68040\n\t"
336
                               "move16 %2@+,%0@+\n\t"
337
                               ".chip 68k\n\t"
338
                               "subqw  #8,%2\n\t"
339
                               "subqw  #8,%2\n\t"
340
                               "dbra   %1,1b\n\t"
341
                               : "=a" (s), "=d" (tmp)
342
                               : "a" (sp), "0" (s), "1" ((count - 16) / 16 - 1)
343
                               );
344
  }
345
 
346
#else
347
  __asm__ __volatile__("1:\t"
348
                       "movel %2,%0@+\n\t"
349
                       "movel %2,%0@+\n\t"
350
                       "movel %2,%0@+\n\t"
351
                       "movel %2,%0@+\n\t"
352
                       "movel %2,%0@+\n\t"
353
                       "movel %2,%0@+\n\t"
354
                       "movel %2,%0@+\n\t"
355
                       "movel %2,%0@+\n\t"
356
                       "dbra  %1,1b\n\t"
357
                       : "=a" (s), "=d" (tmp)
358
                       : "d" (data), "0" (s), "1" (count / 32 - 1)
359
                       );
360
#endif
361
 
362
  return xs;
363
}
364
 
365
extern void *memset(void *,int,__kernel_size_t);
366
 
367
#define __memset_const(s,c,count) \
368
((count==PAGE_SIZE) ? \
369
  __memset_page((s),(c),(count)) : \
370
  __memset_g((s),(c),(count)))
371
 
372
#define memset(s, c, count) \
373
(__builtin_constant_p(count) ? \
374
 __memset_const((s),(c),(count)) : \
375
 __memset_g((s),(c),(count)))
376
 
377
#define __HAVE_ARCH_MEMCPY
378
extern void * memcpy(void *, const void *, size_t );
379
/*
380
 * __builtin_memcpy() does not handle page-sized memcpys very well,
381
 * thus following the same assumptions as for page-sized memsets, this
382
 * function copies page-sized areas using an unrolled loop, without
383
 * considering alignment.
384
 *
385
 * For the 680[46]0 only kernels we use the move16 instruction instead
386
 * as it writes through the data-cache, invalidating the cache-lines
387
 * touched. In this way we do not use up the entire data-cache (well,
388
 * half of it on the 68060) by copying a page. An unrolled loop of two
389
 * move16 instructions seem to the fastest. The only caveat is that
390
 * both source and destination must be 16-byte aligned, if not we fall
391
 * back to the generic memcpy function.  - Jes
392
 */
393
static inline void * __memcpy_page(void * to, const void * from, size_t count)
394
{
395
  unsigned long tmp;
396
  void *xto = to;
397
 
398
#ifdef CPU_M68040_OR_M68060_ONLY
399
 
400
  if (((unsigned long) to | (unsigned long) from) & 0x0f)
401
          return memcpy(to, from, count);
402
 
403
  __asm__ __volatile__("1:\t"
404
                       ".chip 68040\n\t"
405
                       "move16 %1@+,%0@+\n\t"
406
                       "move16 %1@+,%0@+\n\t"
407
                       ".chip 68k\n\t"
408
                       "dbra  %2,1b\n\t"
409
                       : "=a" (to), "=a" (from), "=d" (tmp)
410
                       : "0" (to), "1" (from) , "2" (count / 32 - 1)
411
                       );
412
#else
413
  __asm__ __volatile__("1:\t"
414
                       "movel %1@+,%0@+\n\t"
415
                       "movel %1@+,%0@+\n\t"
416
                       "movel %1@+,%0@+\n\t"
417
                       "movel %1@+,%0@+\n\t"
418
                       "movel %1@+,%0@+\n\t"
419
                       "movel %1@+,%0@+\n\t"
420
                       "movel %1@+,%0@+\n\t"
421
                       "movel %1@+,%0@+\n\t"
422
                       "dbra  %2,1b\n\t"
423
                       : "=a" (to), "=a" (from), "=d" (tmp)
424
                       : "0" (to), "1" (from) , "2" (count / 32 - 1)
425
                       );
426
#endif
427
  return xto;
428
}
429
 
430
#define __memcpy_const(to, from, n) \
431
((n==PAGE_SIZE) ? \
432
  __memcpy_page((to),(from),(n)) : \
433
  __builtin_memcpy((to),(from),(n)))
434
 
435
#define memcpy(to, from, n) \
436
(__builtin_constant_p(n) ? \
437
 __memcpy_const((to),(from),(n)) : \
438
 memcpy((to),(from),(n)))
439
 
440
#define __HAVE_ARCH_MEMMOVE
441
static inline void * memmove(void * dest,const void * src, size_t n)
442
{
443
  void *xdest = dest;
444
  size_t temp;
445
 
446
  if (!n)
447
    return xdest;
448
 
449
  if (dest < src)
450
    {
451
      if ((long) dest & 1)
452
        {
453
          char *cdest = dest;
454
          const char *csrc = src;
455
          *cdest++ = *csrc++;
456
          dest = cdest;
457
          src = csrc;
458
          n--;
459
        }
460
      if (n > 2 && (long) dest & 2)
461
        {
462
          short *sdest = dest;
463
          const short *ssrc = src;
464
          *sdest++ = *ssrc++;
465
          dest = sdest;
466
          src = ssrc;
467
          n -= 2;
468
        }
469
      temp = n >> 2;
470
      if (temp)
471
        {
472
          long *ldest = dest;
473
          const long *lsrc = src;
474
          temp--;
475
          do
476
            *ldest++ = *lsrc++;
477
          while (temp--);
478
          dest = ldest;
479
          src = lsrc;
480
        }
481
      if (n & 2)
482
        {
483
          short *sdest = dest;
484
          const short *ssrc = src;
485
          *sdest++ = *ssrc++;
486
          dest = sdest;
487
          src = ssrc;
488
        }
489
      if (n & 1)
490
        {
491
          char *cdest = dest;
492
          const char *csrc = src;
493
          *cdest = *csrc;
494
        }
495
    }
496
  else
497
    {
498
      dest = (char *) dest + n;
499
      src = (const char *) src + n;
500
      if ((long) dest & 1)
501
        {
502
          char *cdest = dest;
503
          const char *csrc = src;
504
          *--cdest = *--csrc;
505
          dest = cdest;
506
          src = csrc;
507
          n--;
508
        }
509
      if (n > 2 && (long) dest & 2)
510
        {
511
          short *sdest = dest;
512
          const short *ssrc = src;
513
          *--sdest = *--ssrc;
514
          dest = sdest;
515
          src = ssrc;
516
          n -= 2;
517
        }
518
      temp = n >> 2;
519
      if (temp)
520
        {
521
          long *ldest = dest;
522
          const long *lsrc = src;
523
          temp--;
524
          do
525
            *--ldest = *--lsrc;
526
          while (temp--);
527
          dest = ldest;
528
          src = lsrc;
529
        }
530
      if (n & 2)
531
        {
532
          short *sdest = dest;
533
          const short *ssrc = src;
534
          *--sdest = *--ssrc;
535
          dest = sdest;
536
          src = ssrc;
537
        }
538
      if (n & 1)
539
        {
540
          char *cdest = dest;
541
          const char *csrc = src;
542
          *--cdest = *--csrc;
543
        }
544
    }
545
  return xdest;
546
}
547
 
548
#define __HAVE_ARCH_MEMCMP
549
extern int memcmp(const void * ,const void * ,size_t );
550
#define memcmp(cs, ct, n) \
551
(__builtin_constant_p(n) ? \
552
 __builtin_memcmp((cs),(ct),(n)) : \
553
 memcmp((cs),(ct),(n)))
554
 
555
#define __HAVE_ARCH_MEMCHR
556
static inline void *memchr(const void *cs, int c, size_t count)
557
{
558
        /* Someone else can optimize this, I don't care - tonym@mac.linux-m68k.org */
559
        unsigned char *ret = (unsigned char *)cs;
560
        for(;count>0;count--,ret++)
561
                if(*ret == c) return ret;
562
 
563
        return NULL;
564
}
565
 
566
#endif /* _M68K_STRING_H_ */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.