OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [gnu-dev/] [or1k-gcc/] [libgcc/] [config/] [i386/] [morestack.S] - Blame information for rev 734

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 734 jeremybenn
# x86/x86_64 support for -fsplit-stack.
2
# Copyright (C) 2009, 2010, 2011 Free Software Foundation, Inc.
3
# Contributed by Ian Lance Taylor .
4
 
5
# This file is part of GCC.
6
 
7
# GCC is free software; you can redistribute it and/or modify it under
8
# the terms of the GNU General Public License as published by the Free
9
# Software Foundation; either version 3, or (at your option) any later
10
# version.
11
 
12
# GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
14
# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15
# for more details.
16
 
17
# Under Section 7 of GPL version 3, you are granted additional
18
# permissions described in the GCC Runtime Library Exception, version
19
# 3.1, as published by the Free Software Foundation.
20
 
21
# You should have received a copy of the GNU General Public License and
22
# a copy of the GCC Runtime Library Exception along with this program;
23
# see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
24
# .
25
 
26
 
27
# Support for allocating more stack space when using -fsplit-stack.
28
# When a function discovers that it needs more stack space, it will
29
# call __morestack with the size of the stack frame and the size of
30
# the parameters to copy from the old stack frame to the new one.
31
# The __morestack function preserves the parameter registers and
32
# calls __generic_morestack to actually allocate the stack space.
33
 
34
# When this is called stack space is very low, but we ensure that
35
# there is enough space to push the parameter registers and to call
36
# __generic_morestack.
37
 
38
# When calling __generic_morestack, FRAME_SIZE points to the size of
39
# the desired frame when the function is called, and the function
40
# sets it to the size of the allocated stack.  OLD_STACK points to
41
# the parameters on the old stack and PARAM_SIZE is the number of
42
# bytes of parameters to copy to the new stack.  These are the
43
# parameters of the function that called __morestack.  The
44
# __generic_morestack function returns the new stack pointer,
45
# pointing to the address of the first copied parameter.  The return
46
# value minus the returned *FRAME_SIZE will be the first address on
47
# the stack which we should not use.
48
 
49
# void *__generic_morestack (size_t *frame_size, void *old_stack,
50
#                            size_t param_size);
51
 
52
# The __morestack routine has to arrange for the caller to return to a
53
# stub on the new stack.  The stub is responsible for restoring the
54
# old stack pointer and returning to the caller's caller.  This calls
55
# __generic_releasestack to retrieve the old stack pointer and release
56
# the newly allocated stack.
57
 
58
# void *__generic_releasestack (size_t *available);
59
 
60
# We do a little dance so that the processor's call/return return
61
# address prediction works out.  The compiler arranges for the caller
62
# to look like this:
63
#   call __generic_morestack
64
#   ret
65
#  L:
66
#   // carry on with function
67
# After we allocate more stack, we call L, which is in our caller.
68
# When that returns (to the predicted instruction), we release the
69
# stack segment and reset the stack pointer.  We then return to the
70
# predicted instruction, namely the ret instruction immediately after
71
# the call to __generic_morestack.  That then returns to the caller of
72
# the original caller.
73
 
74
 
75
# The amount of extra space we ask for.  In general this has to be
76
# enough for the dynamic loader to find a symbol and for a signal
77
# handler to run.
78
 
79
#ifndef __x86_64__
80
#define BACKOFF (1024)
81
#else
82
#define BACKOFF (1536)
83
#endif
84
 
85
 
86
# This entry point is for split-stack code which calls non-split-stack
87
# code.  When the linker sees this case, it converts the call to
88
# __morestack to call __morestack_non_split instead.  We just bump the
89
# requested stack space by 16K.
90
 
91
        .global __morestack_non_split
92
        .hidden __morestack_non_split
93
 
94
#ifdef __ELF__
95
       .type    __morestack_non_split,@function
96
#endif
97
 
98
__morestack_non_split:
99
        .cfi_startproc
100
 
101
#ifndef __x86_64__
102
 
103
        # See below for an extended explanation of this.
104
        .cfi_def_cfa %esp,16
105
 
106
        pushl   %eax                    # Save %eax in case it is a parameter.
107
 
108
        .cfi_adjust_cfa_offset 4        # Account for pushed register.
109
 
110
        movl    %esp,%eax               # Current stack,
111
        subl    8(%esp),%eax            # less required stack frame size,
112
        subl    $0x4000,%eax            # less space for non-split code.
113
        cmpl    %gs:0x30,%eax           # See if we have enough space.
114
        jb      2f                      # Get more space if we need it.
115
 
116
        # Here the stack is
117
        #       %esp + 20:      stack pointer after two returns
118
        #       %esp + 16:      return address of morestack caller's caller
119
        #       %esp + 12:      size of parameters
120
        #       %esp + 8:       new stack frame size
121
        #       %esp + 4:       return address of this function
122
        #       %esp:           saved %eax
123
        #
124
        # Since we aren't doing a full split stack, we don't need to
125
        # do anything when our caller returns.  So we return to our
126
        # caller rather than calling it, and let it return as usual.
127
        # To make that work we adjust the return address.
128
 
129
        # This breaks call/return address prediction for the call to
130
        # this function.  I can't figure out a way to make it work
131
        # short of copying the parameters down the stack, which will
132
        # probably take more clock cycles than we will lose breaking
133
        # call/return address prediction.  We will only break
134
        # prediction for this call, not for our caller.
135
 
136
        movl    4(%esp),%eax            # Increment the return address
137
        cmpb    $0xc3,(%eax)            # to skip the ret instruction;
138
        je      1f                      # see above.
139
        addl    $2,%eax
140
1:      inc     %eax
141
        movl    %eax,4(%esp)            # Update return address.
142
 
143
        popl    %eax                    # Restore %eax and stack.
144
 
145
        .cfi_adjust_cfa_offset -4       # Account for popped register.
146
 
147
        ret     $8                      # Return to caller, popping args.
148
 
149
2:
150
        .cfi_adjust_cfa_offset 4        # Back to where we were.
151
 
152
        popl    %eax                    # Restore %eax and stack.
153
 
154
        .cfi_adjust_cfa_offset -4       # Account for popped register.
155
 
156
        addl    $0x5000+BACKOFF,4(%esp) # Increment space we request.
157
 
158
        # Fall through into morestack.
159
 
160
#else
161
 
162
        # See below for an extended explanation of this.
163
        .cfi_def_cfa %rsp,16
164
 
165
        pushq   %rax                    # Save %rax in case caller is using
166
                                        # it to preserve original %r10.
167
        .cfi_adjust_cfa_offset 8        # Adjust for pushed register.
168
 
169
        movq    %rsp,%rax               # Current stack,
170
        subq    %r10,%rax               # less required stack frame size,
171
        subq    $0x4000,%rax            # less space for non-split code.
172
 
173
#ifdef __LP64__
174
        cmpq    %fs:0x70,%rax           # See if we have enough space.
175
#else
176
        cmpl    %fs:0x40,%eax
177
#endif
178
        popq    %rax                    # Restore register.
179
 
180
        .cfi_adjust_cfa_offset -8       # Adjust for popped register.
181
 
182
        jb      2f                      # Get more space if we need it.
183
 
184
        # This breaks call/return prediction, as described above.
185
        incq    (%rsp)                  # Increment the return address.
186
 
187
        ret                             # Return to caller.
188
 
189
2:
190
        addq    $0x5000+BACKOFF,%r10    # Increment space we request.
191
 
192
        # Fall through into morestack.
193
 
194
#endif
195
 
196
        .cfi_endproc
197
#ifdef __ELF__
198
        .size   __morestack_non_split, . - __morestack_non_split
199
#endif
200
 
201
# __morestack_non_split falls through into __morestack.
202
 
203
 
204
# The __morestack function.
205
 
206
        .global __morestack
207
        .hidden __morestack
208
 
209
#ifdef __ELF__
210
        .type   __morestack,@function
211
#endif
212
 
213
__morestack:
214
.LFB1:
215
        .cfi_startproc
216
 
217
 
218
#ifndef __x86_64__
219
 
220
 
221
# The 32-bit __morestack function.
222
 
223
        # We use a cleanup to restore the stack guard if an exception
224
        # is thrown through this code.
225
#ifndef __PIC__
226
        .cfi_personality 0,__gcc_personality_v0
227
        .cfi_lsda 0,.LLSDA1
228
#else
229
        .cfi_personality 0x9b,DW.ref.__gcc_personality_v0
230
        .cfi_lsda 0x1b,.LLSDA1
231
#endif
232
 
233
        # We return below with a ret $8.  We will return to a single
234
        # return instruction, which will return to the caller of our
235
        # caller.  We let the unwinder skip that single return
236
        # instruction, and just return to the real caller.
237
 
238
        # Here CFA points just past the return address on the stack,
239
        # e.g., on function entry it is %esp + 4.  The stack looks
240
        # like this:
241
        #       CFA + 12:       stack pointer after two returns
242
        #       CFA + 8:        return address of morestack caller's caller
243
        #       CFA + 4:        size of parameters
244
        #       CFA:            new stack frame size
245
        #       CFA - 4:        return address of this function
246
        #       CFA - 8:        previous value of %ebp; %ebp points here
247
        # Setting the new CFA to be the current CFA + 12 (i.e., %esp +
248
        # 16) will make the unwinder pick up the right return address.
249
 
250
        .cfi_def_cfa %esp,16
251
 
252
        pushl   %ebp
253
        .cfi_adjust_cfa_offset 4
254
        .cfi_offset %ebp, -20
255
        movl    %esp,%ebp
256
        .cfi_def_cfa_register %ebp
257
 
258
        # In 32-bit mode the parameters are pushed on the stack.  The
259
        # argument size is pushed then the new stack frame size is
260
        # pushed.
261
 
262
        # Align stack to 16-byte boundary with enough space for saving
263
        # registers and passing parameters to functions we call.
264
        subl    $40,%esp
265
 
266
        # Because our cleanup code may need to clobber %ebx, we need
267
        # to save it here so the unwinder can restore the value used
268
        # by the caller.  Note that we don't have to restore the
269
        # register, since we don't change it, we just have to save it
270
        # for the unwinder.
271
        movl    %ebx,-4(%ebp)
272
        .cfi_offset %ebx, -24
273
 
274
        # In 32-bit mode the registers %eax, %edx, and %ecx may be
275
        # used for parameters, depending on the regparm and fastcall
276
        # attributes.
277
 
278
        movl    %eax,-8(%ebp)
279
        movl    %edx,-12(%ebp)
280
        movl    %ecx,-16(%ebp)
281
 
282
        call    __morestack_block_signals
283
 
284
        movl    12(%ebp),%eax           # The size of the parameters.
285
        movl    %eax,8(%esp)
286
        leal    20(%ebp),%eax           # Address of caller's parameters.
287
        movl    %eax,4(%esp)
288
        addl    $BACKOFF,8(%ebp)        # Ask for backoff bytes.
289
        leal    8(%ebp),%eax            # The address of the new frame size.
290
        movl    %eax,(%esp)
291
 
292
        call    __generic_morestack
293
 
294
        movl    %eax,%esp               # Switch to the new stack.
295
        subl    8(%ebp),%eax            # The end of the stack space.
296
        addl    $BACKOFF,%eax           # Back off 512 bytes.
297
 
298
.LEHB0:
299
        # FIXME: The offset must match
300
        # TARGET_THREAD_SPLIT_STACK_OFFSET in
301
        # gcc/config/i386/linux.h.
302
        movl    %eax,%gs:0x30           # Save the new stack boundary.
303
 
304
        call    __morestack_unblock_signals
305
 
306
        movl    -12(%ebp),%edx          # Restore registers.
307
        movl    -16(%ebp),%ecx
308
 
309
        movl    4(%ebp),%eax            # Increment the return address
310
        cmpb    $0xc3,(%eax)            # to skip the ret instruction;
311
        je      1f                      # see above.
312
        addl    $2,%eax
313
1:      inc     %eax
314
 
315
        movl    %eax,-12(%ebp)          # Store return address in an
316
                                        # unused slot.
317
 
318
        movl    -8(%ebp),%eax           # Restore the last register.
319
 
320
        call    *-12(%ebp)              # Call our caller!
321
 
322
        # The caller will return here, as predicted.
323
 
324
        # Save the registers which may hold a return value.  We
325
        # assume that __generic_releasestack does not touch any
326
        # floating point or vector registers.
327
        pushl   %eax
328
        pushl   %edx
329
 
330
        # Push the arguments to __generic_releasestack now so that the
331
        # stack is at a 16-byte boundary for
332
        # __morestack_block_signals.
333
        pushl   $0                      # Where the available space is returned.
334
        leal    0(%esp),%eax            # Push its address.
335
        push    %eax
336
 
337
        call    __morestack_block_signals
338
 
339
        call    __generic_releasestack
340
 
341
        subl    4(%esp),%eax            # Subtract available space.
342
        addl    $BACKOFF,%eax           # Back off 512 bytes.
343
.LEHE0:
344
        movl    %eax,%gs:0x30           # Save the new stack boundary.
345
 
346
        addl    $8,%esp                 # Remove values from stack.
347
 
348
        # We need to restore the old stack pointer, which is in %rbp,
349
        # before we unblock signals.  We also need to restore %eax and
350
        # %edx after we unblock signals but before we return.  Do this
351
        # by moving %eax and %edx from the current stack to the old
352
        # stack.
353
 
354
        popl    %edx                    # Pop return value from current stack.
355
        popl    %eax
356
 
357
        movl    %ebp,%esp               # Restore stack pointer.
358
 
359
        pushl   %eax                    # Push return value on old stack.
360
        pushl   %edx
361
        subl    $8,%esp                 # Align stack to 16-byte boundary.
362
 
363
        call    __morestack_unblock_signals
364
 
365
        addl    $8,%esp
366
        popl    %edx                    # Restore return value.
367
        popl    %eax
368
 
369
        .cfi_remember_state
370
 
371
        # We never changed %ebx, so we don't have to actually restore it.
372
        .cfi_restore %ebx
373
 
374
        popl    %ebp
375
        .cfi_restore %ebp
376
        .cfi_def_cfa %esp, 16
377
        ret     $8                      # Return to caller, which will
378
                                        # immediately return.  Pop
379
                                        # arguments as we go.
380
 
381
# This is the cleanup code called by the stack unwinder when unwinding
382
# through the code between .LEHB0 and .LEHE0 above.
383
 
384
.L1:
385
        .cfi_restore_state
386
        subl    $16,%esp                # Maintain 16 byte alignment.
387
        movl    %eax,4(%esp)            # Save exception header.
388
        movl    %ebp,(%esp)             # Stack pointer after resume.
389
        call    __generic_findstack
390
        movl    %ebp,%ecx               # Get the stack pointer.
391
        subl    %eax,%ecx               # Subtract available space.
392
        addl    $BACKOFF,%ecx           # Back off 512 bytes.
393
        movl    %ecx,%gs:0x30           # Save new stack boundary.
394
        movl    4(%esp),%eax            # Function argument.
395
        movl    %eax,(%esp)
396
#ifdef __PIC__
397
        call    __x86.get_pc_thunk.bx   # %ebx may not be set up for us.
398
        addl    $_GLOBAL_OFFSET_TABLE_, %ebx
399
        call    _Unwind_Resume@PLT      # Resume unwinding.
400
#else
401
        call    _Unwind_Resume
402
#endif
403
 
404
#else /* defined(__x86_64__) */
405
 
406
 
407
# The 64-bit __morestack function.
408
 
409
        # We use a cleanup to restore the stack guard if an exception
410
        # is thrown through this code.
411
#ifndef __PIC__
412
        .cfi_personality 0x3,__gcc_personality_v0
413
        .cfi_lsda 0x3,.LLSDA1
414
#else
415
        .cfi_personality 0x9b,DW.ref.__gcc_personality_v0
416
        .cfi_lsda 0x1b,.LLSDA1
417
#endif
418
 
419
        # We will return a single return instruction, which will
420
        # return to the caller of our caller.  Let the unwinder skip
421
        # that single return instruction, and just return to the real
422
        # caller.
423
        .cfi_def_cfa %rsp,16
424
 
425
        # Set up a normal backtrace.
426
        pushq   %rbp
427
        .cfi_adjust_cfa_offset 8
428
        .cfi_offset %rbp, -24
429
        movq    %rsp, %rbp
430
        .cfi_def_cfa_register %rbp
431
 
432
        # In 64-bit mode the new stack frame size is passed in r10
433
        # and the argument size is passed in r11.
434
 
435
        addq    $BACKOFF,%r10           # Ask for backoff bytes.
436
        pushq   %r10                    # Save new frame size.
437
 
438
        # In 64-bit mode the registers %rdi, %rsi, %rdx, %rcx, %r8,
439
        # and %r9 may be used for parameters.  We also preserve %rax
440
        # which the caller may use to hold %r10.
441
 
442
        pushq   %rax
443
        pushq   %rdi
444
        pushq   %rsi
445
        pushq   %rdx
446
        pushq   %rcx
447
        pushq   %r8
448
        pushq   %r9
449
 
450
        pushq   %r11
451
        pushq   $0                      # For alignment.
452
 
453
        call    __morestack_block_signals
454
 
455
        leaq    -8(%rbp),%rdi           # Address of new frame size.
456
        leaq    24(%rbp),%rsi           # The caller's parameters.
457
        addq    $8,%rsp
458
        popq    %rdx                    # The size of the parameters.
459
 
460
        call    __generic_morestack
461
 
462
        movq    -8(%rbp),%r10           # Reload modified frame size
463
        movq    %rax,%rsp               # Switch to the new stack.
464
        subq    %r10,%rax               # The end of the stack space.
465
        addq    $BACKOFF,%rax           # Back off 1024 bytes.
466
 
467
.LEHB0:
468
        # FIXME: The offset must match
469
        # TARGET_THREAD_SPLIT_STACK_OFFSET in
470
        # gcc/config/i386/linux64.h.
471
        # Macro to save the new stack boundary.
472
#ifdef __LP64__
473
#define X86_64_SAVE_NEW_STACK_BOUNDARY(reg)     movq    %r##reg,%fs:0x70
474
#else
475
#define X86_64_SAVE_NEW_STACK_BOUNDARY(reg)     movl    %e##reg,%fs:0x40
476
#endif
477
        X86_64_SAVE_NEW_STACK_BOUNDARY (ax)
478
 
479
        call    __morestack_unblock_signals
480
 
481
        movq    -24(%rbp),%rdi          # Restore registers.
482
        movq    -32(%rbp),%rsi
483
        movq    -40(%rbp),%rdx
484
        movq    -48(%rbp),%rcx
485
        movq    -56(%rbp),%r8
486
        movq    -64(%rbp),%r9
487
 
488
        movq    8(%rbp),%r10            # Increment the return address
489
        incq    %r10                    # to skip the ret instruction;
490
                                        # see above.
491
 
492
        movq    -16(%rbp),%rax          # Restore caller's %rax.
493
 
494
        call    *%r10                   # Call our caller!
495
 
496
        # The caller will return here, as predicted.
497
 
498
        # Save the registers which may hold a return value.  We
499
        # assume that __generic_releasestack does not touch any
500
        # floating point or vector registers.
501
        pushq   %rax
502
        pushq   %rdx
503
 
504
        call    __morestack_block_signals
505
 
506
        pushq   $0                      # For alignment.
507
        pushq   $0                      # Where the available space is returned.
508
        leaq    0(%rsp),%rdi            # Pass its address.
509
 
510
        call    __generic_releasestack
511
 
512
        subq    0(%rsp),%rax            # Subtract available space.
513
        addq    $BACKOFF,%rax           # Back off 1024 bytes.
514
.LEHE0:
515
        X86_64_SAVE_NEW_STACK_BOUNDARY (ax)
516
 
517
        addq    $16,%rsp                # Remove values from stack.
518
 
519
        # We need to restore the old stack pointer, which is in %rbp,
520
        # before we unblock signals.  We also need to restore %rax and
521
        # %rdx after we unblock signals but before we return.  Do this
522
        # by moving %rax and %rdx from the current stack to the old
523
        # stack.
524
 
525
        popq    %rdx                    # Pop return value from current stack.
526
        popq    %rax
527
 
528
        movq    %rbp,%rsp               # Restore stack pointer.
529
 
530
        pushq   %rax                    # Push return value on old stack.
531
        pushq   %rdx
532
 
533
        call    __morestack_unblock_signals
534
 
535
        popq    %rdx                    # Restore return value.
536
        popq    %rax
537
 
538
        .cfi_remember_state
539
        popq    %rbp
540
        .cfi_restore %rbp
541
        .cfi_def_cfa %rsp, 16
542
        ret                             # Return to caller, which will
543
                                        # immediately return.
544
 
545
# This is the cleanup code called by the stack unwinder when unwinding
546
# through the code between .LEHB0 and .LEHE0 above.
547
 
548
.L1:
549
        .cfi_restore_state
550
        subq    $16,%rsp                # Maintain 16 byte alignment.
551
        movq    %rax,(%rsp)             # Save exception header.
552
        movq    %rbp,%rdi               # Stack pointer after resume.
553
        call    __generic_findstack
554
        movq    %rbp,%rcx               # Get the stack pointer.
555
        subq    %rax,%rcx               # Subtract available space.
556
        addq    $BACKOFF,%rcx           # Back off 1024 bytes.
557
        X86_64_SAVE_NEW_STACK_BOUNDARY (cx)
558
        movq    (%rsp),%rdi             # Restore exception data for call.
559
#ifdef __PIC__
560
        call    _Unwind_Resume@PLT      # Resume unwinding.
561
#else
562
        call    _Unwind_Resume          # Resume unwinding.
563
#endif
564
 
565
#endif /* defined(__x86_64__) */
566
 
567
        .cfi_endproc
568
#ifdef __ELF__
569
        .size   __morestack, . - __morestack
570
#endif
571
 
572
#if !defined(__x86_64__) && defined(__PIC__)
573
# Output the thunk to get PC into bx, since we use it above.
574
        .section        .text.__x86.get_pc_thunk.bx,"axG",@progbits,__x86.get_pc_thunk.bx,comdat
575
        .globl  __x86.get_pc_thunk.bx
576
        .hidden __x86.get_pc_thunk.bx
577
#ifdef __ELF__
578
        .type   __x86.get_pc_thunk.bx, @function
579
#endif
580
__x86.get_pc_thunk.bx:
581
        .cfi_startproc
582
        movl    (%esp), %ebx
583
        ret
584
        .cfi_endproc
585
#ifdef __ELF__
586
        .size   __x86.get_pc_thunk.bx, . - __x86.get_pc_thunk.bx
587
#endif
588
#endif
589
 
590
# The exception table.  This tells the personality routine to execute
591
# the exception handler.
592
 
593
        .section        .gcc_except_table,"a",@progbits
594
        .align  4
595
.LLSDA1:
596
        .byte   0xff    # @LPStart format (omit)
597
        .byte   0xff    # @TType format (omit)
598
        .byte   0x1     # call-site format (uleb128)
599
        .uleb128 .LLSDACSE1-.LLSDACSB1  # Call-site table length
600
.LLSDACSB1:
601
        .uleb128 .LEHB0-.LFB1   # region 0 start
602
        .uleb128 .LEHE0-.LEHB0  # length
603
        .uleb128 .L1-.LFB1      # landing pad
604
        .uleb128 0              # action
605
.LLSDACSE1:
606
 
607
 
608
        .global __gcc_personality_v0
609
#ifdef __PIC__
610
        # Build a position independent reference to the basic
611
        # personality function.
612
        .hidden DW.ref.__gcc_personality_v0
613
        .weak   DW.ref.__gcc_personality_v0
614
        .section .data.DW.ref.__gcc_personality_v0,"awG",@progbits,DW.ref.__gcc_personality_v0,comdat
615
        .type   DW.ref.__gcc_personality_v0, @object
616
DW.ref.__gcc_personality_v0:
617
#ifndef __LP64__
618
        .align 4
619
        .size   DW.ref.__gcc_personality_v0, 4
620
        .long   __gcc_personality_v0
621
#else
622
        .align 8
623
        .size   DW.ref.__gcc_personality_v0, 8
624
        .quad   __gcc_personality_v0
625
#endif
626
#endif
627
 
628
#if defined __x86_64__ && defined __LP64__
629
 
630
# This entry point is used for the large model.  With this entry point
631
# the upper 32 bits of %r10 hold the argument size and the lower 32
632
# bits hold the new stack frame size.  There doesn't seem to be a way
633
# to know in the assembler code that we are assembling for the large
634
# model, and there doesn't seem to be a large model multilib anyhow.
635
# If one is developed, then the non-PIC code is probably OK since we
636
# will probably be close to the morestack code, but the PIC code
637
# almost certainly needs to be changed.  FIXME.
638
 
639
        .text
640
        .global __morestack_large_model
641
        .hidden __morestack_large_model
642
 
643
#ifdef __ELF__
644
        .type   __morestack_large_model,@function
645
#endif
646
 
647
__morestack_large_model:
648
 
649
        .cfi_startproc
650
 
651
        movq    %r10, %r11
652
        andl    $0xffffffff, %r10d
653
        sarq    $32, %r11
654
        jmp     __morestack
655
 
656
        .cfi_endproc
657
#ifdef __ELF__
658
       .size    __morestack_large_model, . - __morestack_large_model
659
#endif
660
 
661
#endif /* __x86_64__ && __LP64__ */
662
 
663
# Initialize the stack test value when the program starts or when a
664
# new thread starts.  We don't know how large the main stack is, so we
665
# guess conservatively.  We might be able to use getrlimit here.
666
 
667
        .text
668
        .global __stack_split_initialize
669
        .hidden __stack_split_initialize
670
 
671
#ifdef __ELF__
672
        .type   __stack_split_initialize, @function
673
#endif
674
 
675
__stack_split_initialize:
676
 
677
#ifndef __x86_64__
678
 
679
        leal    -16000(%esp),%eax       # We should have at least 16K.
680
        movl    %eax,%gs:0x30
681
        pushl   $16000
682
        pushl   %esp
683
#ifdef __PIC__
684
        call    __generic_morestack_set_initial_sp@PLT
685
#else
686
        call    __generic_morestack_set_initial_sp
687
#endif
688
        addl    $8,%esp
689
        ret
690
 
691
#else /* defined(__x86_64__) */
692
 
693
        leaq    -16000(%rsp),%rax       # We should have at least 16K.
694
        X86_64_SAVE_NEW_STACK_BOUNDARY (ax)
695
        movq    %rsp,%rdi
696
        movq    $16000,%rsi
697
#ifdef __PIC__
698
        call    __generic_morestack_set_initial_sp@PLT
699
#else
700
        call    __generic_morestack_set_initial_sp
701
#endif
702
        ret
703
 
704
#endif /* defined(__x86_64__) */
705
 
706
#ifdef __ELF__
707
        .size   __stack_split_initialize, . - __stack_split_initialize
708
#endif
709
 
710
# Routines to get and set the guard, for __splitstack_getcontext,
711
# __splitstack_setcontext, and __splitstack_makecontext.
712
 
713
# void *__morestack_get_guard (void) returns the current stack guard.
714
        .text
715
        .global __morestack_get_guard
716
        .hidden __morestack_get_guard
717
 
718
#ifdef __ELF__
719
        .type   __morestack_get_guard,@function
720
#endif
721
 
722
__morestack_get_guard:
723
 
724
#ifndef __x86_64__
725
        movl    %gs:0x30,%eax
726
#else
727
#ifdef __LP64__
728
        movq    %fs:0x70,%rax
729
#else
730
        movl    %fs:0x40,%eax
731
#endif
732
#endif
733
        ret
734
 
735
#ifdef __ELF__
736
        .size   __morestack_get_guard, . - __morestack_get_guard
737
#endif
738
 
739
# void __morestack_set_guard (void *) sets the stack guard.
740
        .global __morestack_set_guard
741
        .hidden __morestack_set_guard
742
 
743
#ifdef __ELF__
744
        .type   __morestack_set_guard,@function
745
#endif
746
 
747
__morestack_set_guard:
748
 
749
#ifndef __x86_64__
750
        movl    4(%esp),%eax
751
        movl    %eax,%gs:0x30
752
#else
753
        X86_64_SAVE_NEW_STACK_BOUNDARY (di)
754
#endif
755
        ret
756
 
757
#ifdef __ELF__
758
        .size   __morestack_set_guard, . - __morestack_set_guard
759
#endif
760
 
761
# void *__morestack_make_guard (void *, size_t) returns the stack
762
# guard value for a stack.
763
        .global __morestack_make_guard
764
        .hidden __morestack_make_guard
765
 
766
#ifdef __ELF__
767
        .type   __morestack_make_guard,@function
768
#endif
769
 
770
__morestack_make_guard:
771
 
772
#ifndef __x86_64__
773
        movl    4(%esp),%eax
774
        subl    8(%esp),%eax
775
        addl    $BACKOFF,%eax
776
#else
777
        subq    %rsi,%rdi
778
        addq    $BACKOFF,%rdi
779
        movq    %rdi,%rax
780
#endif
781
        ret
782
 
783
#ifdef __ELF__
784
        .size   __morestack_make_guard, . - __morestack_make_guard
785
#endif
786
 
787
# Make __stack_split_initialize a high priority constructor.  FIXME:
788
# This is ELF specific.
789
 
790
        .section        .ctors.65535,"aw",@progbits
791
 
792
#ifndef __LP64__
793
        .align  4
794
        .long   __stack_split_initialize
795
        .long   __morestack_load_mmap
796
#else
797
        .align  8
798
        .quad   __stack_split_initialize
799
        .quad   __morestack_load_mmap
800
#endif
801
 
802
#ifdef __ELF__
803
        .section        .note.GNU-stack,"",@progbits
804
        .section        .note.GNU-split-stack,"",@progbits
805
        .section        .note.GNU-no-split-stack,"",@progbits
806
#endif

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.