OpenCores
URL https://opencores.org/ocsvn/openrisc_me/openrisc_me/trunk

Subversion Repositories openrisc_me

[/] [openrisc/] [trunk/] [gnu-src/] [gcc-4.5.1/] [gcc/] [config/] [i386/] [fma4intrin.h] - Blame information for rev 282

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 282 jeremybenn
/* Copyright (C) 2007, 2008, 2009 Free Software Foundation, Inc.
2
 
3
   This file is part of GCC.
4
 
5
   GCC is free software; you can redistribute it and/or modify
6
   it under the terms of the GNU General Public License as published by
7
   the Free Software Foundation; either version 3, or (at your option)
8
   any later version.
9
 
10
   GCC is distributed in the hope that it will be useful,
11
   but WITHOUT ANY WARRANTY; without even the implied warranty of
12
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13
   GNU General Public License for more details.
14
 
15
   Under Section 7 of GPL version 3, you are granted additional
16
   permissions described in the GCC Runtime Library Exception, version
17
   3.1, as published by the Free Software Foundation.
18
 
19
   You should have received a copy of the GNU General Public License and
20
   a copy of the GCC Runtime Library Exception along with this program;
21
   see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
22
   <http://www.gnu.org/licenses/>.  */
23
 
24
#ifndef _X86INTRIN_H_INCLUDED
25
# error "Never use <fma4intrin.h> directly; include <x86intrin.h> instead."
26
#endif
27
 
28
#ifndef _FMA4INTRIN_H_INCLUDED
29
#define _FMA4INTRIN_H_INCLUDED
30
 
31
#ifndef __FMA4__
32
# error "FMA4 instruction set not enabled"
33
#else
34
 
35
/* We need definitions from the SSE4A, SSE3, SSE2 and SSE header files.  */
36
#include <ammintrin.h>
37
 
38
/* 128b Floating point multiply/add type instructions.  */
39
extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
40
_mm_macc_ps (__m128 __A, __m128 __B, __m128 __C)
41
{
42
  return (__m128) __builtin_ia32_vfmaddps ((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
43
}
44
 
45
extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
46
_mm_macc_pd (__m128d __A, __m128d __B, __m128d __C)
47
{
48
  return (__m128d) __builtin_ia32_vfmaddpd ((__v2df)__A, (__v2df)__B, (__v2df)__C);
49
}
50
 
51
extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
52
_mm_macc_ss (__m128 __A, __m128 __B, __m128 __C)
53
{
54
  return (__m128) __builtin_ia32_vfmaddss ((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
55
}
56
 
57
extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
58
_mm_macc_sd (__m128d __A, __m128d __B, __m128d __C)
59
{
60
  return (__m128d) __builtin_ia32_vfmaddsd ((__v2df)__A, (__v2df)__B, (__v2df)__C);
61
}
62
 
63
extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
64
_mm_msub_ps (__m128 __A, __m128 __B, __m128 __C)
65
 
66
{
67
  return (__m128) __builtin_ia32_vfmsubps ((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
68
}
69
 
70
extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
71
_mm_msub_pd (__m128d __A, __m128d __B, __m128d __C)
72
{
73
  return (__m128d) __builtin_ia32_vfmsubpd ((__v2df)__A, (__v2df)__B, (__v2df)__C);
74
}
75
 
76
extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
77
_mm_msub_ss (__m128 __A, __m128 __B, __m128 __C)
78
{
79
  return (__m128) __builtin_ia32_vfmsubss ((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
80
}
81
 
82
extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
83
_mm_msub_sd (__m128d __A, __m128d __B, __m128d __C)
84
{
85
  return (__m128d) __builtin_ia32_vfmsubsd ((__v2df)__A, (__v2df)__B, (__v2df)__C);
86
}
87
 
88
extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
89
_mm_nmacc_ps (__m128 __A, __m128 __B, __m128 __C)
90
{
91
  return (__m128) __builtin_ia32_vfnmaddps ((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
92
}
93
 
94
extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
95
_mm_nmacc_pd (__m128d __A, __m128d __B, __m128d __C)
96
{
97
  return (__m128d) __builtin_ia32_vfnmaddpd ((__v2df)__A, (__v2df)__B, (__v2df)__C);
98
}
99
 
100
extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
101
_mm_nmacc_ss (__m128 __A, __m128 __B, __m128 __C)
102
{
103
  return (__m128) __builtin_ia32_vfnmaddss ((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
104
}
105
 
106
extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
107
_mm_nmacc_sd (__m128d __A, __m128d __B, __m128d __C)
108
{
109
  return (__m128d) __builtin_ia32_vfnmaddsd ((__v2df)__A, (__v2df)__B, (__v2df)__C);
110
}
111
 
112
extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
113
_mm_nmsub_ps (__m128 __A, __m128 __B, __m128 __C)
114
{
115
  return (__m128) __builtin_ia32_vfnmsubps ((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
116
}
117
 
118
extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
119
_mm_nmsub_pd (__m128d __A, __m128d __B, __m128d __C)
120
{
121
  return (__m128d) __builtin_ia32_vfnmsubpd ((__v2df)__A, (__v2df)__B, (__v2df)__C);
122
}
123
 
124
extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
125
_mm_nmsub_ss (__m128 __A, __m128 __B, __m128 __C)
126
{
127
  return (__m128) __builtin_ia32_vfnmsubss ((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
128
}
129
 
130
extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
131
_mm_nmsub_sd (__m128d __A, __m128d __B, __m128d __C)
132
{
133
  return (__m128d) __builtin_ia32_vfnmsubsd ((__v2df)__A, (__v2df)__B, (__v2df)__C);
134
}
135
 
136
extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
137
_mm_maddsub_ps (__m128 __A, __m128 __B, __m128 __C)
138
{
139
  return (__m128) __builtin_ia32_vfmaddsubps ((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
140
}
141
 
142
extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
143
_mm_maddsub_pd (__m128d __A, __m128d __B, __m128d __C)
144
{
145
  return (__m128d) __builtin_ia32_vfmaddsubpd ((__v2df)__A, (__v2df)__B, (__v2df)__C);
146
}
147
 
148
extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
149
_mm_msubadd_ps (__m128 __A, __m128 __B, __m128 __C)
150
{
151
  return (__m128) __builtin_ia32_vfmsubaddps ((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
152
}
153
 
154
extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
155
_mm_msubadd_pd (__m128d __A, __m128d __B, __m128d __C)
156
{
157
  return (__m128d) __builtin_ia32_vfmsubaddpd ((__v2df)__A, (__v2df)__B, (__v2df)__C);
158
}
159
 
160
/* 256b Floating point multiply/add type instructions.  */
161
extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
162
_mm256_macc_ps (__m256 __A, __m256 __B, __m256 __C)
163
{
164
  return (__m256) __builtin_ia32_vfmaddps256 ((__v8sf)__A, (__v8sf)__B, (__v8sf)__C);
165
}
166
 
167
extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
168
_mm256_macc_pd (__m256d __A, __m256d __B, __m256d __C)
169
{
170
  return (__m256d) __builtin_ia32_vfmaddpd256 ((__v4df)__A, (__v4df)__B, (__v4df)__C);
171
}
172
 
173
extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
174
_mm256_msub_ps (__m256 __A, __m256 __B, __m256 __C)
175
 
176
{
177
  return (__m256) __builtin_ia32_vfmsubps256 ((__v8sf)__A, (__v8sf)__B, (__v8sf)__C);
178
}
179
 
180
extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
181
_mm256_msub_pd (__m256d __A, __m256d __B, __m256d __C)
182
{
183
  return (__m256d) __builtin_ia32_vfmsubpd256 ((__v4df)__A, (__v4df)__B, (__v4df)__C);
184
}
185
 
186
extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
187
_mm256_nmacc_ps (__m256 __A, __m256 __B, __m256 __C)
188
{
189
  return (__m256) __builtin_ia32_vfnmaddps256 ((__v8sf)__A, (__v8sf)__B, (__v8sf)__C);
190
}
191
 
192
extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
193
_mm256_nmacc_pd (__m256d __A, __m256d __B, __m256d __C)
194
{
195
  return (__m256d) __builtin_ia32_vfnmaddpd256 ((__v4df)__A, (__v4df)__B, (__v4df)__C);
196
}
197
 
198
extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
199
_mm256_nmsub_ps (__m256 __A, __m256 __B, __m256 __C)
200
{
201
  return (__m256) __builtin_ia32_vfnmsubps256 ((__v8sf)__A, (__v8sf)__B, (__v8sf)__C);
202
}
203
 
204
extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
205
_mm256_nmsub_pd (__m256d __A, __m256d __B, __m256d __C)
206
{
207
  return (__m256d) __builtin_ia32_vfnmsubpd256 ((__v4df)__A, (__v4df)__B, (__v4df)__C);
208
}
209
 
210
extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
211
_mm256_maddsub_ps (__m256 __A, __m256 __B, __m256 __C)
212
{
213
  return (__m256) __builtin_ia32_vfmaddsubps256 ((__v8sf)__A, (__v8sf)__B, (__v8sf)__C);
214
}
215
 
216
extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
217
_mm256_maddsub_pd (__m256d __A, __m256d __B, __m256d __C)
218
{
219
  return (__m256d) __builtin_ia32_vfmaddsubpd256 ((__v4df)__A, (__v4df)__B, (__v4df)__C);
220
}
221
 
222
extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
223
_mm256_msubadd_ps (__m256 __A, __m256 __B, __m256 __C)
224
{
225
  return (__m256) __builtin_ia32_vfmsubaddps256 ((__v8sf)__A, (__v8sf)__B, (__v8sf)__C);
226
}
227
 
228
extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
229
_mm256_msubadd_pd (__m256d __A, __m256d __B, __m256d __C)
230
{
231
  return (__m256d) __builtin_ia32_vfmsubaddpd256 ((__v4df)__A, (__v4df)__B, (__v4df)__C);
232
}
233
 
234
#endif
235
 
236
#endif

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.