OpenCores
URL https://opencores.org/ocsvn/sparc64soc/sparc64soc/trunk

Subversion Repositories sparc64soc

[/] [sparc64soc/] [trunk/] [T1-CPU/] [exu/] [sparc_exu_ecl_wb.v] - Blame information for rev 2

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 2 dmitryr
// ========== Copyright Header Begin ==========================================
2
// 
3
// OpenSPARC T1 Processor File: sparc_exu_ecl_wb.v
4
// Copyright (c) 2006 Sun Microsystems, Inc.  All Rights Reserved.
5
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES.
6
// 
7
// The above named program is free software; you can redistribute it and/or
8
// modify it under the terms of the GNU General Public
9
// License version 2 as published by the Free Software Foundation.
10
// 
11
// The above named program is distributed in the hope that it will be 
12
// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
13
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
// General Public License for more details.
15
// 
16
// You should have received a copy of the GNU General Public
17
// License along with this work; if not, write to the Free Software
18
// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
19
// 
20
// ========== Copyright Header End ============================================
21
////////////////////////////////////////////////////////////////////////
22
/*
23
//  Module Name: sparc_exu_ecl_wb
24
//      Description:  Implements the writeback logic for the exu.
25
//              This includes the control signals for the w1 and w2 input
26
//      muxes as well as keeping track of the wen signal for ALU ops.
27
*/
28
 
29
module sparc_exu_ecl_wb (/*AUTOARG*/
30
   // Outputs
31
   wb_ccr_wrccr_w, ecl_rml_cwp_wen_e, ecl_rml_cansave_wen_w,
32
   ecl_rml_canrestore_wen_w, ecl_rml_otherwin_wen_w,
33
   ecl_rml_wstate_wen_w, ecl_rml_cleanwin_wen_w, ecl_byp_sel_load_m,
34
   ecl_byp_sel_restore_m, ecl_byp_sel_pipe_m, ecl_byp_restore_m,
35
   ecl_irf_tid_m, ecl_irf_rd_m, ecl_irf_rd_g, ecl_irf_wen_w2,
36
   ecl_irf_tid_g, wb_e, bypass_m, ecl_irf_wen_w, ecl_byp_sel_load_g,
37
   ecl_byp_sel_muldiv_g, ecl_byp_sel_restore_g, wb_divcntl_ack_g,
38
   wb_ccr_setcc_g, ecl_byp_eclpr_e, exu_ifu_longop_done_g,
39
   ecl_div_yreg_wen_w, ecl_div_yreg_wen_g, ecl_div_yreg_shift_g,
40
   ecl_div_yreg_wen_l, wb_eccctl_spec_wen_next, bypass_w,
41
   wb_byplog_rd_w2, wb_byplog_tid_w2, wb_byplog_wen_w2,
42
   wb_byplog_rd_g2, wb_byplog_wen_g2, read_yreg_e,
43
   exu_ffu_wsr_inst_e,
44
   // Inputs
45
   clk, se, reset, sehold, ld_rd_g, ld_tid_g, lsu_exu_dfill_vld_g,
46
   lsu_exu_ldst_miss_g2, rd_m, tid_m, thr_m, tid_w1, ifu_exu_wen_d,
47
   ifu_exu_kill_e, ecl_exu_kill_m, rml_ecl_kill_m, ifu_tlu_flush_w,
48
   flush_w1, divcntl_wb_req_g, mdqctl_wb_divrd_g, mdqctl_wb_divthr_g,
49
   mdqctl_wb_mulrd_g, mdqctl_wb_multhr_g, mdqctl_wb_divsetcc_g,
50
   mdqctl_wb_mulsetcc_g, ecl_div_sel_div, ifu_tlu_wsr_inst_d,
51
   ifu_tlu_sraddr_d, rml_ecl_cwp_d, rml_ecl_cansave_d,
52
   rml_ecl_canrestore_d, rml_ecl_otherwin_d, rml_ecl_wstate_d,
53
   rml_ecl_cleanwin_d, exu_ifu_cc_d, rml_ecl_swap_done,
54
   rml_ecl_rmlop_done_e, mdqctl_wb_yreg_wen_g,
55
   mdqctl_wb_yreg_shift_g, ecl_byp_sel_ecc_m, eccctl_wb_rd_m,
56
   ifu_exu_inst_vld_e, ifu_exu_inst_vld_w, ifu_exu_return_d,
57
   restore_e, rml_ecl_fill_e, early_flush_w, ecl_byp_ldxa_g
58
   ) ;
59
   input clk;
60
   input se;
61
   input reset;
62
   input sehold;
63
   input [4:0] ld_rd_g;
64
   input [1:0] ld_tid_g;
65
   input       lsu_exu_dfill_vld_g;
66
   input        lsu_exu_ldst_miss_g2;
67
   input [4:0]  rd_m;
68
   input [1:0]  tid_m;
69
   input [3:0]  thr_m;
70
   input [1:0]  tid_w1;
71
   input        ifu_exu_wen_d;
72
   input        ifu_exu_kill_e;
73
   input        ecl_exu_kill_m;
74
   input        rml_ecl_kill_m; // kill from spill or fill trap
75
   input        ifu_tlu_flush_w;
76
   input        flush_w1;
77
   input        divcntl_wb_req_g;
78
   input [4:0]  mdqctl_wb_divrd_g;
79
   input [1:0]  mdqctl_wb_divthr_g;
80
   input [4:0]  mdqctl_wb_mulrd_g;
81
   input [1:0]  mdqctl_wb_multhr_g;
82
   input        mdqctl_wb_divsetcc_g;
83
   input        mdqctl_wb_mulsetcc_g;
84
   input        ecl_div_sel_div;
85
   input        ifu_tlu_wsr_inst_d;
86
   input [6:0] ifu_tlu_sraddr_d;
87
   input [2:0] rml_ecl_cwp_d;
88
   input [2:0] rml_ecl_cansave_d;
89
   input [2:0] rml_ecl_canrestore_d;
90
   input [2:0] rml_ecl_otherwin_d;
91
   input [5:0] rml_ecl_wstate_d;
92
   input [2:0] rml_ecl_cleanwin_d;
93
   input [7:0] exu_ifu_cc_d;
94
   input [3:0] rml_ecl_swap_done;
95
   input       rml_ecl_rmlop_done_e;
96
   input         mdqctl_wb_yreg_wen_g;
97
   input         mdqctl_wb_yreg_shift_g;
98
   input         ecl_byp_sel_ecc_m;
99
   input  [4:0] eccctl_wb_rd_m;
100
   input        ifu_exu_inst_vld_e;
101
   input        ifu_exu_inst_vld_w;
102
   input        ifu_exu_return_d;
103
   input  restore_e;
104
   input  rml_ecl_fill_e;
105
   input  early_flush_w;
106
   input        ecl_byp_ldxa_g;
107
 
108
   output      wb_ccr_wrccr_w;
109
   output      ecl_rml_cwp_wen_e;
110
   output      ecl_rml_cansave_wen_w;
111
   output      ecl_rml_canrestore_wen_w;
112
   output      ecl_rml_otherwin_wen_w;
113
   output      ecl_rml_wstate_wen_w;
114
   output      ecl_rml_cleanwin_wen_w;
115
   output      ecl_byp_sel_load_m;
116
   output      ecl_byp_sel_restore_m;
117
   output      ecl_byp_sel_pipe_m;
118
   output      ecl_byp_restore_m;
119
   output [1:0] ecl_irf_tid_m;
120
   output [4:0] ecl_irf_rd_m;
121
   output [4:0] ecl_irf_rd_g;
122
   output       ecl_irf_wen_w2;
123
   output [1:0] ecl_irf_tid_g;
124
   output       wb_e;
125
   output       bypass_m;
126
   output       ecl_irf_wen_w;
127
   output       ecl_byp_sel_load_g;
128
   output       ecl_byp_sel_muldiv_g;
129
   output       ecl_byp_sel_restore_g;
130
   output       wb_divcntl_ack_g;
131
   output       wb_ccr_setcc_g;
132
   output [7:0] ecl_byp_eclpr_e;
133
   output [3:0]  exu_ifu_longop_done_g;
134
   output [3:0]  ecl_div_yreg_wen_w;
135
   output [3:0]  ecl_div_yreg_wen_g;
136
   output [3:0]  ecl_div_yreg_shift_g;
137
   output [3:0]  ecl_div_yreg_wen_l;// w or w2 or shift
138
   output        wb_eccctl_spec_wen_next;
139
   output        bypass_w;
140
   output [4:0] wb_byplog_rd_w2;
141
   output [1:0] wb_byplog_tid_w2;
142
   output       wb_byplog_wen_w2;
143
   output [4:0] wb_byplog_rd_g2;
144
   output       wb_byplog_wen_g2;
145
   output       read_yreg_e;
146
   output       exu_ffu_wsr_inst_e;
147
 
148
   wire          wb_e;
149
   wire          wb_m;
150
   wire          wb_w;
151
   wire          inst_vld_noflush_wen_m;
152
   wire          inst_vld_noflush_wen_w;
153
   wire       ecl_irf_wen_g;
154
   wire      yreg_wen_w;
155
   wire      yreg_wen_w1;
156
   wire      yreg_wen_w1_vld;
157
   wire      wen_no_inst_vld_m;         // load or restore or ce wen
158
   wire        wen_no_inst_vld_w;
159
   wire        wen_w_inst_vld;
160
   wire        valid_e;
161
   wire        valid_m;
162
   wire    valid_w;
163
   wire    ecl_sel_mul_g;
164
   wire    ecl_sel_div_g;
165
   wire [1:0] muldiv_tid;
166
   wire        setcc_g;        // without wen from divcntl
167
   wire    wrsr_e;
168
   wire    wrsr_m;
169
   wire    wrsr_w;
170
   wire    [6:0] sraddr_e;
171
   wire    [6:0] sraddr_m;
172
   wire    [6:0] sraddr_w;
173
   wire    sraddr_ccr_w;
174
   wire    sraddr_y_w;
175
   wire    sraddr_cwp_e;
176
   wire    sraddr_cansave_w;
177
   wire    sraddr_canrestore_w;
178
   wire    sraddr_cleanwin_w;
179
   wire    sraddr_otherwin_w;
180
   wire    sraddr_wstate_w;
181
   wire    sel_cleanwin_d;
182
   wire    sel_otherwin_d;
183
   wire    sel_wstate_d;
184
   wire    sel_canrestore_d;
185
   wire    sel_ccr_d;
186
   wire    sel_cansave_d;
187
   wire    sel_cwp_d;
188
   wire    sel_rdpr_mux1_d;
189
   wire [2:0] rdpr_mux1_out;
190
   wire [7:0] rdpr_mux2_out;
191
   wire [3:0] muldiv_done_g;
192
   wire [3:0]    multhr_dec_g;
193
   wire [3:0]    divthr_dec_g;
194
   wire [3:0]    thrdec_w1;
195
   wire   short_longop_done_e;
196
   wire   short_longop_done_m;
197
   wire [3:0] short_longop_done;
198
   wire       return_e;
199
   wire   restore_m;
200
   wire   restore_w;
201
   wire   vld_restore_e;
202
   wire   vld_restore_w;
203
   wire   restore_request;
204
   wire   restore_wen;
205
   wire   restore_ready;
206
   wire   restore_ready_next;
207
   wire   restore_picked;
208
   wire [3:0]  restore_done;
209
   wire [1:0] restore_tid;
210
   wire [4:0] restore_rd;
211
   wire [3:0] restore_thr;
212
   wire [3:0] ecl_longop_done_kill_m;
213
   wire [3:0] ecl_longop_done_nokill_m;
214
   wire       dfill_vld_g2;
215
   wire       ld_g;
216
   wire       ld_g2;
217
   wire [1:0] dfill_tid_g2;
218
   wire [4:0] dfill_rd_g2;
219
   wire       kill_ld_g2;
220
   wire [1:0] tid_w2;
221
   wire [4:0] rd_w2;
222
 
223
   ////////////////////////////////////////////
224
   // Pass along result of load for one cycle
225
   ////////////////////////////////////////////
226
   assign     ld_g = lsu_exu_dfill_vld_g | ecl_byp_ldxa_g;
227
   dff_s dfill_vld_dff (.din(ld_g), .clk(clk), .q(ld_g2),
228
                      .se(se), .si(), .so());
229
   assign     kill_ld_g2 = flush_w1 & (dfill_tid_g2[1:0] == tid_w1[1:0]);
230
   assign     dfill_vld_g2 = ld_g2 & ~kill_ld_g2 & ~lsu_exu_ldst_miss_g2;
231
   dff_s #(2) dfill_tid_dff(.din(ld_tid_g[1:0]), .clk(clk), .q(dfill_tid_g2[1:0]),
232
                          .se(se), .si(), .so());
233
   dff_s #(5) dfill_rd_dff(.din(ld_rd_g[4:0]), .clk(clk), .q(dfill_rd_g2[4:0]),
234
                         .se(se), .si(), .so());
235
 
236
   ///////////////////////////////////////////
237
   // Help with bypassing of long latency ops
238
   ///////////////////////////////////////////
239
   assign       wb_byplog_rd_w2[4:0] = rd_w2[4:0];
240
   assign       wb_byplog_wen_w2 = ecl_irf_wen_w2;
241
   assign       wb_byplog_tid_w2[1:0] = tid_w2[1:0];
242
   assign       wb_byplog_rd_g2[4:0] = dfill_rd_g2[4:0];
243
   assign       wb_byplog_wen_g2 = ld_g2;
244
 
245
 
246
   ////////////////////////////////////////////////////////////////
247
   // G selection logic (picks between LOAD and MUL/DIV outputs)
248
   ////////////////////////////////////////////////////////////////
249
   // select signals: priority LOAD, RESTORE, MUL, DIV
250
   assign      ecl_byp_sel_load_g = (ld_g2 & (wb_m | wrsr_m | ecl_byp_sel_ecc_m));
251
   assign      ecl_byp_sel_restore_g = restore_request & ((wb_m | wrsr_m | ecl_byp_sel_ecc_m) ^ ld_g2);
252
   assign      ecl_byp_sel_muldiv_g = ~(ecl_byp_sel_load_g | ecl_byp_sel_restore_g);
253
   assign      ecl_sel_mul_g = ~ecl_div_sel_div & ecl_byp_sel_muldiv_g;
254
   assign      ecl_sel_div_g = ecl_div_sel_div & ecl_byp_sel_muldiv_g;
255
   assign      wb_divcntl_ack_g = ecl_byp_sel_muldiv_g;
256
 
257
   assign      muldiv_tid[1:0] = (ecl_div_sel_div)? mdqctl_wb_divthr_g[1:0]: mdqctl_wb_multhr_g[1:0];
258
   assign muldiv_done_g[3] = ((wb_divcntl_ack_g & divcntl_wb_req_g) &
259
                              muldiv_tid[1] & muldiv_tid[0]);
260
   assign muldiv_done_g[2] = ((wb_divcntl_ack_g & divcntl_wb_req_g) &
261
                              muldiv_tid[1] & ~muldiv_tid[0]);
262
   assign muldiv_done_g[1] = ((wb_divcntl_ack_g & divcntl_wb_req_g) &
263
                              ~muldiv_tid[1] & muldiv_tid[0]);
264
   assign muldiv_done_g[0] = ((wb_divcntl_ack_g & divcntl_wb_req_g) &
265
                              ~muldiv_tid[1] & ~muldiv_tid[0]);
266
 
267
   assign ecl_irf_wen_g = (sehold)? ecl_irf_wen_w2:
268
                                   (ecl_byp_sel_load_g & dfill_vld_g2 |
269
                                    (ecl_byp_sel_restore_g & restore_wen) |
270
                                    (ecl_byp_sel_muldiv_g & divcntl_wb_req_g));
271
 
272
   dff_s wen_w2_dff(.din(ecl_irf_wen_g), .clk(clk), .q(ecl_irf_wen_w2),
273
                  .se(se), .si(), .so());
274
   mux4ds #(5) rd_g_mux(.dout(ecl_irf_rd_g[4:0]), .in0(dfill_rd_g2[4:0]),
275
                       .in1(mdqctl_wb_divrd_g[4:0]),
276
                       .in2(mdqctl_wb_mulrd_g[4:0]),
277
                        .in3(restore_rd[4:0]),
278
                       .sel0(ecl_byp_sel_load_g),
279
                       .sel1(ecl_sel_div_g),
280
                        .sel2(ecl_sel_mul_g),
281
                        .sel3(ecl_byp_sel_restore_g));
282
   mux4ds #(2) thr_g_mux(.dout(ecl_irf_tid_g[1:0]), .in0(dfill_tid_g2[1:0]),
283
                        .in1(mdqctl_wb_divthr_g[1:0]),
284
                        .in2(mdqctl_wb_multhr_g[1:0]),
285
                         .in3(restore_tid[1:0]),
286
                        .sel0(ecl_byp_sel_load_g),
287
                        .sel1(ecl_sel_div_g),
288
                         .sel2(ecl_sel_mul_g),
289
                         .sel3(ecl_byp_sel_restore_g));
290
   mux2ds setcc_g_mux(.dout(setcc_g),
291
                         .in0(mdqctl_wb_mulsetcc_g),
292
                         .in1(mdqctl_wb_divsetcc_g),
293
                         .sel0(~ecl_div_sel_div),
294
                         .sel1(ecl_div_sel_div));
295
   dff_s #(2) dff_thr_g2w2(.din(ecl_irf_tid_g[1:0]), .clk(clk), .q(tid_w2[1:0]), .se(se),
296
                      .si(), .so());
297
   dff_s #(5) dff_rd_g2w2(.din(ecl_irf_rd_g[4:0]), .clk(clk), .q(rd_w2[4:0]), .se(se),
298
                     .si(), .so());
299
   // needs wen to setcc
300
   assign wb_ccr_setcc_g = wb_divcntl_ack_g & divcntl_wb_req_g & setcc_g;
301
 
302
 
303
   ///////////////////
304
   // W1 port control
305
   ///////////////////
306
   // sehold will turn off in pipe writes and put the hold functionality through
307
   // the non inst_vld part
308
   // Mux between load and ALU for rd, thr, and wen
309
   assign      ecl_byp_sel_load_m = ~(wb_m | wrsr_m | ecl_byp_sel_ecc_m) & ld_g2;
310
   assign      ecl_byp_sel_pipe_m = (wb_m | wrsr_m) & ~ecl_byp_sel_ecc_m;
311
   assign      ecl_byp_sel_restore_m = ~(wb_m | wrsr_m | ld_g2 | ecl_byp_sel_ecc_m);
312
   assign      wen_no_inst_vld_m = (sehold)? ecl_irf_wen_w:
313
                                             ((dfill_vld_g2 & ecl_byp_sel_load_m) |
314
                                              (ecl_byp_sel_restore_m & restore_wen));
315
   dff_s dff_lsu_wen_m2w(.din(wen_no_inst_vld_m), .clk(clk), .q(wen_no_inst_vld_w), .se(se), .si(),
316
                       .so());
317
   // ecc_wen must be kept separate because it needs to check inst_vld but not flush
318
   assign      inst_vld_noflush_wen_m = ecl_byp_sel_ecc_m & ~sehold;
319
   dff_s ecc_wen_m2w(.din(inst_vld_noflush_wen_m), .clk(clk), .q(inst_vld_noflush_wen_w), .se(se), .si(), .so());
320
 
321
   assign ecl_irf_tid_m[1:0] = ((ecl_byp_sel_load_m)? dfill_tid_g2[1:0]:
322
                                (ecl_byp_sel_restore_m)? restore_tid[1:0]:
323
                                tid_m[1:0]);
324
 
325
   mux4ds #(5) rd_mux(.dout(ecl_irf_rd_m[4:0]),
326
                      .in0(rd_m[4:0]),
327
                      .in1(dfill_rd_g2[4:0]),
328
                      .in2(eccctl_wb_rd_m[4:0]),
329
                      .in3(restore_rd[4:0]),
330
                      .sel0(ecl_byp_sel_pipe_m),
331
                      .sel1(ecl_byp_sel_load_m),
332
                      .sel2(ecl_byp_sel_ecc_m),
333
                      .sel3(ecl_byp_sel_restore_m));
334
   assign wen_w_inst_vld = valid_w | inst_vld_noflush_wen_w;
335
   assign ecl_irf_wen_w = ifu_exu_inst_vld_w & wen_w_inst_vld | wen_no_inst_vld_w;
336
 
337
   // bypass valid logic and flops
338
   dff_s dff_wb_d2e(.din(ifu_exu_wen_d), .clk(clk), .q(wb_e), .se(se),
339
                  .si(), .so());
340
   dff_s dff_wb_e2m(.din(valid_e), .clk(clk), .q(wb_m), .se(se),
341
                  .si(), .so());
342
   dffr_s dff_wb_m2w(.din(valid_m), .clk(clk), .q(wb_w), .se(se),
343
                  .si(), .so(), .rst(reset));
344
   assign  valid_e = wb_e & ~ifu_exu_kill_e & ~restore_e & ~wrsr_e;// restore doesn't finish on time
345
   assign  bypass_m = wb_m;// bypass doesn't need to check for traps or sehold
346
   assign  valid_m = bypass_m & ~rml_ecl_kill_m & ~sehold;// sehold turns off writes from this path
347
   assign  valid_w = (wb_w & ~early_flush_w & ~ifu_tlu_flush_w);// check inst_vld later
348
   // don't check flush for bypass
349
   assign  bypass_w = wb_w | inst_vld_noflush_wen_w | wen_no_inst_vld_w;
350
 
351
   // speculative wen for ecc injection
352
   assign  wb_eccctl_spec_wen_next = valid_m | dfill_vld_g2 | restore_request |  divcntl_wb_req_g;
353
 
354
   ///////////////////////////////////////////////////////
355
   // Priviledged register read and write flops and logic
356
   ///////////////////////////////////////////////////////
357
/* -----\/----- EXCLUDED -----\/-----
358
   Decoded sraddr
359
   sraddr[5] = 1-priv, 0-state
360
   Y -   0
361
   CCR - 2
362
   CWP - 9
363
   CANSAVE - a
364
   CARESTORE - b
365
   CLEANWIN - c
366
   OTHERWIN - d
367
   WSTATE - e
368
   GSR - 0x13
369
 -----/\----- EXCLUDED -----/\----- */
370
   assign  ecl_rml_cwp_wen_e = sraddr_cwp_e & wrsr_e;
371
   assign  sraddr_cwp_e = ~sraddr_e[6] & sraddr_e[5] & ~sraddr_e[4] & sraddr_e[3] & ~sraddr_e[2] &
372
           ~sraddr_e[1] & sraddr_e[0];
373
 
374
   assign  sraddr_y_w = ~sraddr_w[6] & ~sraddr_w[5] & ~sraddr_w[4] & ~sraddr_w[3] & ~sraddr_w[2] &
375
           ~sraddr_w[1] & ~sraddr_w[0];
376
   assign  sraddr_ccr_w = ~sraddr_w[6] & ~sraddr_w[5] & ~sraddr_w[4] & ~sraddr_w[3] & ~sraddr_w[2] &
377
           sraddr_w[1] & ~sraddr_w[0];
378
   assign  sraddr_cansave_w = ~sraddr_w[6] & sraddr_w[5] & ~sraddr_w[4] & sraddr_w[3] & ~sraddr_w[2] &
379
           sraddr_w[1] & ~sraddr_w[0];
380
   assign  sraddr_canrestore_w = ~sraddr_w[6] & sraddr_w[5] & ~sraddr_w[4] & sraddr_w[3] & ~sraddr_w[2] &
381
           sraddr_w[1] & sraddr_w[0];
382
   assign  sraddr_cleanwin_w = ~sraddr_w[6] & sraddr_w[5] & ~sraddr_w[4] & sraddr_w[3] & sraddr_w[2] &
383
           ~sraddr_w[1] & ~sraddr_w[0];
384
   assign  sraddr_otherwin_w = ~sraddr_w[6] & sraddr_w[5] & ~sraddr_w[4] & sraddr_w[3] & sraddr_w[2] &
385
           ~sraddr_w[1] & sraddr_w[0];
386
   assign  sraddr_wstate_w = ~sraddr_w[6] & sraddr_w[5] & ~sraddr_w[4] & sraddr_w[3] & sraddr_w[2] &
387
           sraddr_w[1] & ~sraddr_w[0];
388
 
389
   // yreg writes cycle after w and checks flush in that cycle
390
   assign  yreg_wen_w = sraddr_y_w & wrsr_w & ifu_exu_inst_vld_w;
391
   assign  yreg_wen_w1_vld = yreg_wen_w1 & ~flush_w1;
392
 
393
   // controls for all other writes (and flush checks) are in their respective blocks
394
   assign  wb_ccr_wrccr_w = sraddr_ccr_w & wrsr_w;
395
   assign  ecl_rml_cansave_wen_w = sraddr_cansave_w & wrsr_w;
396
   assign  ecl_rml_canrestore_wen_w = sraddr_canrestore_w & wrsr_w;
397
   assign  ecl_rml_cleanwin_wen_w = sraddr_cleanwin_w & wrsr_w;
398
   assign  ecl_rml_otherwin_wen_w = sraddr_otherwin_w & wrsr_w;
399
   assign  ecl_rml_wstate_wen_w = sraddr_wstate_w & wrsr_w;
400
 
401
 
402
   dff_s dff_wrsr_d2e(.din(ifu_tlu_wsr_inst_d), .clk(clk), .q(wrsr_e), .se(se),
403
                   .si(), .so());
404
   assign  exu_ffu_wsr_inst_e = wrsr_e;
405
   dff_s dff_wrsr_e2m(.din(wrsr_e), .clk(clk), .q(wrsr_m), .se(se),
406
                   .si(), .so());
407
   dff_s dff_wrsr_m2w(.din(wrsr_m), .clk(clk), .q(wrsr_w), .se(se),
408
                   .si(), .so());
409
   dff_s #(7) dff_sraddr_d2e(.din(ifu_tlu_sraddr_d[6:0]), .clk(clk), .q(sraddr_e[6:0]), .se(se),
410
                       .si(), .so());
411
   dff_s #(7) dff_sraddr_e2m(.din(sraddr_e[6:0]), .clk(clk), .q(sraddr_m[6:0]), .se(se),
412
                       .si(), .so());
413
   dff_s #(7) dff_sraddr_m2w(.din(sraddr_m[6:0]), .clk(clk), .q(sraddr_w[6:0]), .se(se),
414
                       .si(), .so());
415
   dff_s dff_yreg_wen_w2w1(.din(yreg_wen_w), .clk(clk), .q(yreg_wen_w1), .se(se), .si(), .so());
416
 
417
   // Logic for rdpr/rdsr
418
   // This mux takes advantage of the fact that these 4 encodings don't overlap
419
   assign sel_cleanwin_d = ~ifu_tlu_sraddr_d[1] & ~ifu_tlu_sraddr_d[0];
420
   assign sel_otherwin_d = ~ifu_tlu_sraddr_d[1] & ifu_tlu_sraddr_d[0];
421
   assign sel_cansave_d = ifu_tlu_sraddr_d[1] & ~ifu_tlu_sraddr_d[0];
422
   assign sel_canrestore_d = ifu_tlu_sraddr_d[1] & ifu_tlu_sraddr_d[0];
423
   mux4ds #(3) rdpr_mux1(.dout(rdpr_mux1_out[2:0]),
424
                       .in0(rml_ecl_canrestore_d[2:0]),
425
                       .in1(rml_ecl_cleanwin_d[2:0]),
426
                       .in2(rml_ecl_cansave_d[2:0]),
427
                       .in3(rml_ecl_otherwin_d[2:0]),
428
                       .sel0(sel_canrestore_d),
429
                       .sel1(sel_cleanwin_d),
430
                       .sel2(sel_cansave_d),
431
                       .sel3(sel_otherwin_d));
432
   assign sel_ccr_d = ~ifu_tlu_sraddr_d[3];
433
   assign sel_cwp_d = ifu_tlu_sraddr_d[3] & ~ifu_tlu_sraddr_d[2] & ~ifu_tlu_sraddr_d[1] & ifu_tlu_sraddr_d[0];
434
   assign sel_wstate_d = ifu_tlu_sraddr_d[3] & ifu_tlu_sraddr_d[2] & ifu_tlu_sraddr_d[1] & ~ifu_tlu_sraddr_d[0];
435
   assign sel_rdpr_mux1_d = ~(sel_ccr_d | sel_cwp_d | sel_wstate_d);
436
   mux4ds #(8) rdpr_mux2(.dout(rdpr_mux2_out[7:0]),
437
                       .in0(exu_ifu_cc_d[7:0]),
438
                       .in1({5'b0, rml_ecl_cwp_d[2:0]}),
439
                       .in2({2'b0, rml_ecl_wstate_d[5:0]}),
440
                       .in3({5'b0, rdpr_mux1_out[2:0]}),
441
                       .sel0(sel_ccr_d),
442
                       .sel1(sel_cwp_d),
443
                       .sel2(sel_wstate_d),
444
                       .sel3(sel_rdpr_mux1_d));
445
 
446
   assign read_yreg_e = ~(sraddr_e[3] | sraddr_e[1]);
447
   dff_s #(8) rdpr_dff(.din(rdpr_mux2_out[7:0]), .clk(clk), .q(ecl_byp_eclpr_e[7:0]),
448
                   .se(se), .si(), .so());
449
 
450
 
451
   ///////////////////////////////
452
   // YREG write enable logic
453
   ///////////////////////////////
454
   // decode thr_g for mux select
455
   assign multhr_dec_g[0] = ~mdqctl_wb_multhr_g[1] & ~mdqctl_wb_multhr_g[0];
456
   assign multhr_dec_g[1] = ~mdqctl_wb_multhr_g[1] & mdqctl_wb_multhr_g[0];
457
   assign multhr_dec_g[2] = mdqctl_wb_multhr_g[1] & ~mdqctl_wb_multhr_g[0];
458
   assign multhr_dec_g[3] = mdqctl_wb_multhr_g[1] & mdqctl_wb_multhr_g[0];
459
 
460
   assign divthr_dec_g[0] = ~mdqctl_wb_divthr_g[1] & ~mdqctl_wb_divthr_g[0];
461
   assign divthr_dec_g[1] = ~mdqctl_wb_divthr_g[1] & mdqctl_wb_divthr_g[0];
462
   assign divthr_dec_g[2] = mdqctl_wb_divthr_g[1] & ~mdqctl_wb_divthr_g[0];
463
   assign divthr_dec_g[3] = mdqctl_wb_divthr_g[1] & mdqctl_wb_divthr_g[0];
464
 
465
   assign thrdec_w1[0] = ~tid_w1[1] & ~tid_w1[0];
466
   assign thrdec_w1[1] = ~tid_w1[1] & tid_w1[0];
467
   assign thrdec_w1[2] = tid_w1[1] & ~tid_w1[0];
468
   assign thrdec_w1[3] = tid_w1[1] & tid_w1[0];
469
 
470
   // enable input for each thread
471
 
472
   assign ecl_div_yreg_shift_g[0] = divthr_dec_g[0] & mdqctl_wb_yreg_shift_g;
473
   assign ecl_div_yreg_wen_w[0] = (thrdec_w1[0] & yreg_wen_w1_vld &
474
                                   ~ecl_div_yreg_shift_g[0] &
475
                                   ~ecl_div_yreg_wen_g[0]);
476
   assign ecl_div_yreg_wen_g[0] = (multhr_dec_g[0] & mdqctl_wb_yreg_wen_g &
477
                                   ~ecl_div_yreg_shift_g[0]);
478
   assign ecl_div_yreg_wen_l[0] = ~(ecl_div_yreg_wen_w[0] | ecl_div_yreg_wen_g[0]
479
                                    | ecl_div_yreg_shift_g[0]);
480
   assign ecl_div_yreg_shift_g[1] = divthr_dec_g[1] & mdqctl_wb_yreg_shift_g;
481
   assign ecl_div_yreg_wen_w[1] = (thrdec_w1[1] & yreg_wen_w1_vld &
482
                                   ~ecl_div_yreg_shift_g[1] &
483
                                   ~ecl_div_yreg_wen_g[1]);
484
   assign ecl_div_yreg_wen_g[1] = (multhr_dec_g[1] & mdqctl_wb_yreg_wen_g &
485
                                   ~ecl_div_yreg_shift_g[1]);
486
   assign ecl_div_yreg_wen_l[1] = ~(ecl_div_yreg_wen_w[1] | ecl_div_yreg_wen_g[1]
487
                                    | ecl_div_yreg_shift_g[1]);
488
   assign ecl_div_yreg_shift_g[2] = divthr_dec_g[2] & mdqctl_wb_yreg_shift_g;
489
   assign ecl_div_yreg_wen_w[2] = (thrdec_w1[2] & yreg_wen_w1_vld &
490
                                   ~ecl_div_yreg_shift_g[2] &
491
                                   ~ecl_div_yreg_wen_g[2]);
492
   assign ecl_div_yreg_wen_g[2] = (multhr_dec_g[2] & mdqctl_wb_yreg_wen_g &
493
                                   ~ecl_div_yreg_shift_g[2]);
494
   assign ecl_div_yreg_wen_l[2] = ~(ecl_div_yreg_wen_w[2] | ecl_div_yreg_wen_g[2]
495
                                    | ecl_div_yreg_shift_g[2]);
496
   assign ecl_div_yreg_shift_g[3] = divthr_dec_g[3] & mdqctl_wb_yreg_shift_g;
497
   assign ecl_div_yreg_wen_w[3] = (thrdec_w1[3] & yreg_wen_w1_vld &
498
                                   ~ecl_div_yreg_shift_g[3] &
499
                                   ~ecl_div_yreg_wen_g[3]);
500
   assign ecl_div_yreg_wen_g[3] = (multhr_dec_g[3] & mdqctl_wb_yreg_wen_g &
501
                                   ~ecl_div_yreg_shift_g[3]);
502
   assign ecl_div_yreg_wen_l[3] = ~(ecl_div_yreg_wen_w[3] | ecl_div_yreg_wen_g[3]
503
                                    | ecl_div_yreg_shift_g[3]);
504
 
505
   //////////////////////////////////////////////////////////
506
   // Completion logic for restore
507
   //////////////////////////////////////////////////////////
508
 
509
   // only worry about restores.  Returns are automatically switched back in
510
   assign ecl_byp_restore_m = restore_m;
511
   assign vld_restore_e = restore_e & wb_e & ~return_e & ~rml_ecl_fill_e & ifu_exu_inst_vld_e;
512
   assign vld_restore_w = (restore_w & ~ifu_tlu_flush_w & ~early_flush_w
513
                           & ifu_exu_inst_vld_w & ~reset);
514
 
515
   assign restore_request = restore_w | restore_ready;
516
   assign restore_wen = vld_restore_w | restore_ready;
517
   assign restore_picked = ecl_byp_sel_restore_m | ecl_byp_sel_restore_g;
518
   assign restore_done[3:0] = restore_thr[3:0] & {4{restore_picked & restore_request}};
519
   // restore request waits for kills in the w stage.  they
520
   // won't start until after the flop
521
   assign restore_ready_next = (vld_restore_w  | restore_ready) & ~restore_picked;
522
 
523
   dffe_s #(2) restore_tid_dff(.din(tid_m[1:0]), .clk(clk), .q(restore_tid[1:0]),
524
                             .se(se), .si(), .so(), .en(restore_m));
525
   dffe_s #(5) restore_rd_dff(.din(rd_m[4:0]), .clk(clk), .q(restore_rd[4:0]),
526
                            .se(se), .si(), .so(), .en(restore_m));
527
   dff_s return_d2e(.din(ifu_exu_return_d), .clk(clk), .q(return_e),
528
                   .se(se), .si(), .so());
529
   dff_s restore_e2m(.din(vld_restore_e), .clk(clk), .q(restore_m),
530
                   .se(se), .si(), .so());
531
   dff_s restore_m2w(.din(restore_m), .clk(clk), .q(restore_w),
532
                   .se(se), .si(), .so());
533
   dff_s restore_ready_dff(.din(restore_ready_next), .q(restore_ready),
534
                         .clk(clk), .se(se), .so(), .si());
535
 
536
   //////////////////////////////////////////////////////////
537
   // Completion logic for non integer-pipeline operations
538
   //////////////////////////////////////////////////////////
539
   // short_longops must check inst_vld_e to protect against invalid completion signal
540
   assign short_longop_done_e = (rml_ecl_rmlop_done_e | (restore_e & ~wb_e & ~return_e)) &
541
                                  ifu_exu_inst_vld_e & ~ifu_exu_kill_e;
542
   dff_s longop_done_e2m (.din(short_longop_done_e), .clk(clk), .q(short_longop_done_m), .se(se), .si(), .so());
543
   assign short_longop_done[3:0] = thr_m[3:0] & {4{short_longop_done_m}};
544
 
545
   assign ecl_longop_done_nokill_m[3:0] = (muldiv_done_g[3:0] | restore_done[3:0] | short_longop_done[3:0] |
546
                                           rml_ecl_swap_done[3:0]);
547
   assign ecl_longop_done_kill_m[3:0] = (muldiv_done_g[3:0] | restore_done[3:0] | rml_ecl_swap_done[3:0]);
548
   assign exu_ifu_longop_done_g[3:0] = (ecl_exu_kill_m)? ecl_longop_done_kill_m[3:0]: ecl_longop_done_nokill_m[3:0];
549
 
550
 
551
   // decode tid
552
   assign restore_thr[3] = restore_tid[1] & restore_tid[0];
553
   assign restore_thr[2] = restore_tid[1] & ~restore_tid[0];
554
   assign restore_thr[1] = ~restore_tid[1] & restore_tid[0];
555
   assign restore_thr[0] = ~restore_tid[1] & ~restore_tid[0];
556
 
557
endmodule // sparc_exu_ecl_wb

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.