1 |
36 |
robfinch |
// ============================================================================
|
2 |
|
|
// __
|
3 |
|
|
// \\__/ o\ (C) 2020 Robert Finch, Waterloo
|
4 |
|
|
// \ __ / All rights reserved.
|
5 |
|
|
// \/_// robfinch@finitron.ca
|
6 |
|
|
// ||
|
7 |
|
|
//
|
8 |
48 |
robfinch |
// positAddsub.sv
|
9 |
36 |
robfinch |
// - posit number adder/subtracter
|
10 |
|
|
// - parameterized width
|
11 |
|
|
//
|
12 |
|
|
//
|
13 |
|
|
// This source file is free software: you can redistribute it and/or modify
|
14 |
|
|
// it under the terms of the GNU Lesser General Public License as published
|
15 |
|
|
// by the Free Software Foundation, either version 3 of the License, or
|
16 |
|
|
// (at your option) any later version.
|
17 |
|
|
//
|
18 |
|
|
// This source file is distributed in the hope that it will be useful,
|
19 |
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
20 |
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
21 |
|
|
// GNU General Public License for more details.
|
22 |
|
|
//
|
23 |
|
|
// You should have received a copy of the GNU General Public License
|
24 |
|
|
// along with this program. If not, see .
|
25 |
|
|
//
|
26 |
|
|
// ============================================================================
|
27 |
|
|
|
28 |
48 |
robfinch |
import posit::*;
|
29 |
36 |
robfinch |
|
30 |
|
|
module positAddsub(op, a, b, o);
|
31 |
|
|
input op;
|
32 |
|
|
input [PSTWID-1:0] a;
|
33 |
|
|
input [PSTWID-1:0] b;
|
34 |
|
|
output reg [PSTWID-1:0] o;
|
35 |
|
|
|
36 |
|
|
wire sa, sb;
|
37 |
|
|
reg so;
|
38 |
|
|
wire rop;
|
39 |
|
|
wire [rs:0] rgma, rgmb, rgm1, rgm2, argm1, argm2;
|
40 |
|
|
wire rgsa, rgsb, rgs1, rgs2;
|
41 |
|
|
wire [rs+es+1:0] diff;
|
42 |
|
|
wire [es-1:0] expa, expb, exp1, exp2;
|
43 |
|
|
wire [PSTWID-es-1:0] siga, sigb, sig1, sig2;
|
44 |
|
|
wire zera, zerb;
|
45 |
|
|
wire infa, infb;
|
46 |
|
|
wire [PSTWID-1:0] aa, bb;
|
47 |
|
|
wire inf = infa|infb;
|
48 |
|
|
wire zero = zera & zerb;
|
49 |
|
|
|
50 |
48 |
robfinch |
positDecompose #(PSTWID) u1 (
|
51 |
36 |
robfinch |
.i(a),
|
52 |
|
|
.sgn(sa),
|
53 |
|
|
.rgs(rgsa),
|
54 |
|
|
.rgm(rgma),
|
55 |
|
|
.exp(expa),
|
56 |
|
|
.sig(siga),
|
57 |
|
|
.zer(zera),
|
58 |
|
|
.inf(infa)
|
59 |
|
|
);
|
60 |
|
|
|
61 |
48 |
robfinch |
positDecompose #(PSTWID) u2 (
|
62 |
36 |
robfinch |
.i(b),
|
63 |
|
|
.sgn(sb),
|
64 |
|
|
.rgs(rgsb),
|
65 |
|
|
.rgm(rgmb),
|
66 |
|
|
.exp(expb),
|
67 |
|
|
.sig(sigb),
|
68 |
|
|
.zer(zerb),
|
69 |
|
|
.inf(infb)
|
70 |
|
|
);
|
71 |
|
|
|
72 |
|
|
assign aa = sa ? -a : a;
|
73 |
|
|
assign bb = sb ? -b : b;
|
74 |
|
|
|
75 |
|
|
wire aa_gt_bb = aa >= bb;
|
76 |
|
|
// Determine op really wanted
|
77 |
|
|
assign rop = sa ^ sb ^ op;
|
78 |
|
|
// Sort operand components
|
79 |
|
|
assign rgs1 = aa_gt_bb ? rgsa : rgsb;
|
80 |
|
|
assign rgs2 = aa_gt_bb ? rgsb : rgsa;
|
81 |
|
|
assign rgm1 = aa_gt_bb ? rgma : rgmb;
|
82 |
|
|
assign rgm2 = aa_gt_bb ? rgmb : rgma;
|
83 |
|
|
assign exp1 = aa_gt_bb ? expa : expb;
|
84 |
|
|
assign exp2 = aa_gt_bb ? expb : expa;
|
85 |
|
|
assign sig1 = aa_gt_bb ? siga : sigb;
|
86 |
|
|
assign sig2 = aa_gt_bb ? sigb : siga;
|
87 |
|
|
|
88 |
|
|
assign argm1 = rgs1 ? rgm1 : -rgm1;
|
89 |
|
|
assign argm2 = rgs2 ? rgm2 : -rgm2;
|
90 |
|
|
|
91 |
|
|
assign diff = {argm1,exp1} - {argm2,exp2};
|
92 |
|
|
wire [rs-1:0] exp_diff = (|diff[es+rs:rs]) ? {rs{1'b1}} : diff[rs-1:0];
|
93 |
|
|
wire [PSTWID*2-1:0] sig2s = {sig2,{PSTWID{1'b0}}} >> exp_diff;
|
94 |
|
|
wire [PSTWID*2-1:0] sig1s = {sig1,{PSTWID{1'b0}}};
|
95 |
|
|
wire [PSTWID*2:0] sig_sd = rop ? sig1s - sig2s : sig1s + sig2s;
|
96 |
|
|
wire [1:0] sigov = sig_sd[PSTWID*2:PSTWID*2-1];
|
97 |
|
|
|
98 |
|
|
wire [$clog2(PSTWID-1):0] lzcnt;
|
99 |
|
|
wire [PSTWID-1:0] sigi = {|sigov,sig_sd[PSTWID*2-2:PSTWID]};
|
100 |
|
|
generate begin : gClz
|
101 |
|
|
case(PSTWID)
|
102 |
|
|
16: cntlz16 u1 (.i({sigi}), .o(lzcnt));
|
103 |
|
|
20: cntlz24 u1 (.i({sigi,4'hF}), .o(lzcnt));
|
104 |
|
|
32: cntlz32 u1 (.i({sigi}), .o(lzcnt));
|
105 |
|
|
40: cntlz48 u1 (.i({sigi,8'hFF}), .o(lzcnt));
|
106 |
|
|
52: cntlz64 u1 (.i({sigi,12'hFFF}), .o(lzcnt));
|
107 |
|
|
64: cntlz64 u1 (.i({sigi}), .o(lzcnt));
|
108 |
|
|
80: cntlz80 u1 (.i({sigi}), .o(lzcnt));
|
109 |
|
|
default: ;
|
110 |
|
|
endcase
|
111 |
|
|
end
|
112 |
|
|
endgenerate
|
113 |
|
|
|
114 |
|
|
//positCntlz #(.PSTWID(PSTWID)) u3 (.i({|sigov,sig_sd[PSTWID-2:0]}), .o(lzcnt));
|
115 |
|
|
wire [PSTWID*2-1:0] sig_ls = sig_sd[PSTWID*2-1:0] << lzcnt;
|
116 |
|
|
|
117 |
44 |
robfinch |
wire [rs+1:0] absrgm1 = rgs1 ? rgm1 : -rgm1; // rgs1 = 1 = positive
|
118 |
36 |
robfinch |
wire [es+rs+1:0] rxtmp;
|
119 |
|
|
wire [es+rs+1:0] rxtmp1;
|
120 |
|
|
wire srxtmp1;
|
121 |
|
|
wire [es+rs:0] abs_rxtmp;
|
122 |
|
|
wire [(es==0 ? 0 : es-1):0] expo;
|
123 |
|
|
wire [rs:0] rgmo;
|
124 |
|
|
generate begin : gEsz
|
125 |
|
|
if (es > 0) begin
|
126 |
44 |
robfinch |
assign rxtmp = {absrgm1,exp1} + es - {{es+1{1'b0}},lzcnt};
|
127 |
36 |
robfinch |
assign rxtmp1 = rxtmp + sigov[1]; // add in overflow if any
|
128 |
|
|
assign srxtmp1 = rxtmp1[es+rs+1];
|
129 |
|
|
assign abs_rxtmp = srxtmp1 ? -rxtmp1 : rxtmp1;
|
130 |
|
|
|
131 |
|
|
assign expo = (srxtmp1 & |abs_rxtmp[es-1:0]) ? rxtmp1[es-1:0] : abs_rxtmp[es-1:0];
|
132 |
44 |
robfinch |
assign rgmo = (~srxtmp1 || (|abs_rxtmp[es-1:0])) ? abs_rxtmp[es+rs:es] + 1'b1 : abs_rxtmp[es+rs:es];
|
133 |
36 |
robfinch |
end
|
134 |
|
|
else begin
|
135 |
|
|
assign rxtmp = absrgm1 - {{1{1'b0}},lzcnt};
|
136 |
|
|
assign rxtmp1 = rxtmp + sigov[1]; // add in overflow if any
|
137 |
|
|
assign srxtmp1 = rxtmp1[rs+1];
|
138 |
|
|
assign abs_rxtmp = srxtmp1 ? -rxtmp1 : rxtmp1;
|
139 |
|
|
assign expo = 1'b0;
|
140 |
|
|
assign rgmo = (~srxtmp1) ? abs_rxtmp[rs:0] + 1'b1 : abs_rxtmp[rs:0];
|
141 |
|
|
end
|
142 |
|
|
end
|
143 |
|
|
endgenerate
|
144 |
|
|
|
145 |
|
|
// Exponent and Significand Packing
|
146 |
|
|
reg [2*PSTWID-1+3:0] tmp;
|
147 |
|
|
always @*
|
148 |
|
|
case(es)
|
149 |
|
|
0: tmp = { {PSTWID{~srxtmp1}}, srxtmp1, sig_ls[PSTWID*2-2:PSTWID-2], |sig_ls[PSTWID-3:0]};
|
150 |
|
|
1: tmp = { {PSTWID{~srxtmp1}}, srxtmp1, expo, sig_ls[PSTWID*2-2:PSTWID-1], |sig_ls[PSTWID-2:0]};
|
151 |
|
|
2: tmp = { {PSTWID{~srxtmp1}}, srxtmp1, expo, sig_ls[PSTWID*2-2:PSTWID], |sig_ls[PSTWID-1:0]};
|
152 |
|
|
default: tmp = { {PSTWID{~srxtmp1}}, srxtmp1, expo, sig_ls[PSTWID*2-2:PSTWID+es-2], |sig_ls[PSTWID-1+es-2:0]};
|
153 |
|
|
endcase
|
154 |
|
|
|
155 |
|
|
wire [3*PSTWID-1+3:0] tmp1 = {tmp,{PSTWID{1'b0}}} >> rgmo;
|
156 |
|
|
|
157 |
|
|
// Rounding
|
158 |
44 |
robfinch |
// Guard, Round, and Sticky
|
159 |
36 |
robfinch |
wire L = tmp1[PSTWID+4], G = tmp1[PSTWID+3], R = tmp1[PSTWID+2], St = |tmp1[PSTWID+1:0],
|
160 |
|
|
ulp = ((G & (R | St)) | (L & G & ~(R | St)));
|
161 |
|
|
wire [PSTWID-1:0] rnd_ulp = {{PSTWID-1{1'b0}},ulp};
|
162 |
|
|
|
163 |
|
|
wire [PSTWID:0] tmp1_rnd_ulp = tmp1[2*PSTWID-1+3:PSTWID+3] + rnd_ulp;
|
164 |
|
|
wire [PSTWID-1:0] tmp1_rnd = (rgmo < PSTWID-es-2) ? tmp1_rnd_ulp[PSTWID-1:0] : tmp1[2*PSTWID-1+3:PSTWID+3];
|
165 |
|
|
|
166 |
|
|
// Compute output sign
|
167 |
|
|
always @*
|
168 |
|
|
casez ({zero,sa,op,sb})
|
169 |
|
|
4'b0000: so = 1'b0; // + + + = +
|
170 |
|
|
4'b0001: so = !aa_gt_bb; // + + - = sign of larger
|
171 |
|
|
4'b0010: so = !aa_gt_bb; // + - + = sign of larger
|
172 |
|
|
4'b0011: so = 1'b0; // + - - = +
|
173 |
|
|
4'b0100: so = aa_gt_bb; // - + + = sign of larger
|
174 |
|
|
4'b0101: so = 1'b1; // - + - = -
|
175 |
|
|
4'b0110: so = 1'b1; // - - + = -
|
176 |
|
|
4'b0111: so = aa_gt_bb; // - - - = sign of larger
|
177 |
|
|
4'b1???: so = 1'b0;
|
178 |
|
|
endcase
|
179 |
|
|
|
180 |
|
|
wire [PSTWID-1:0] abs_tmp = so ? -tmp1_rnd : tmp1_rnd;
|
181 |
|
|
|
182 |
|
|
always @*
|
183 |
44 |
robfinch |
casez({zero,inf,sig_ls[PSTWID]&1'b0})
|
184 |
36 |
robfinch |
3'b1??: o = {PSTWID{1'b0}};
|
185 |
|
|
3'b01?: o = {1'b1,{PSTWID-1{1'b0}}};
|
186 |
|
|
3'b001: o = {PSTWID{1'b0}};
|
187 |
|
|
default: o = {so, abs_tmp[PSTWID-1:1]};
|
188 |
|
|
endcase
|
189 |
|
|
|
190 |
|
|
endmodule
|