1 |
221 |
olivier.gi |
// (C) 2001-2014 Altera Corporation. All rights reserved.
|
2 |
|
|
// Your use of Altera Corporation's design tools, logic functions and other
|
3 |
|
|
// software and tools, and its AMPP partner logic functions, and any output
|
4 |
|
|
// files any of the foregoing (including device programming or simulation
|
5 |
|
|
// files), and any associated documentation or information are expressly subject
|
6 |
|
|
// to the terms and conditions of the Altera Program License Subscription
|
7 |
|
|
// Agreement, Altera MegaCore Function License Agreement, or other applicable
|
8 |
|
|
// license agreement, including, without limitation, that your use is for the
|
9 |
|
|
// sole purpose of programming logic devices manufactured by Altera and sold by
|
10 |
|
|
// Altera or its authorized distributors. Please refer to the applicable
|
11 |
|
|
// agreement for further details.
|
12 |
|
|
|
13 |
|
|
|
14 |
|
|
// $Id: //acds/rel/14.0/ip/merlin/altera_merlin_multiplexer/altera_merlin_multiplexer.sv.terp#1 $
|
15 |
|
|
// $Revision: #1 $
|
16 |
|
|
// $Date: 2014/02/16 $
|
17 |
|
|
// $Author: swbranch $
|
18 |
|
|
|
19 |
|
|
// ------------------------------------------
|
20 |
|
|
// Merlin Multiplexer
|
21 |
|
|
// ------------------------------------------
|
22 |
|
|
|
23 |
|
|
`timescale 1 ns / 1 ns
|
24 |
|
|
|
25 |
|
|
|
26 |
|
|
// ------------------------------------------
|
27 |
|
|
// Generation parameters:
|
28 |
|
|
// output_name: DE0_NANO_SOC_QSYS_mm_interconnect_0_rsp_mux_001
|
29 |
|
|
// NUM_INPUTS: 6
|
30 |
|
|
// ARBITRATION_SHARES: 1 1 1 1 1 1
|
31 |
|
|
// ARBITRATION_SCHEME "no-arb"
|
32 |
|
|
// PIPELINE_ARB: 0
|
33 |
|
|
// PKT_TRANS_LOCK: 60 (arbitration locking enabled)
|
34 |
|
|
// ST_DATA_W: 96
|
35 |
|
|
// ST_CHANNEL_W: 6
|
36 |
|
|
// ------------------------------------------
|
37 |
|
|
|
38 |
|
|
module DE0_NANO_SOC_QSYS_mm_interconnect_0_rsp_mux_001
|
39 |
|
|
(
|
40 |
|
|
// ----------------------
|
41 |
|
|
// Sinks
|
42 |
|
|
// ----------------------
|
43 |
|
|
input sink0_valid,
|
44 |
|
|
input [96-1 : 0] sink0_data,
|
45 |
|
|
input [6-1: 0] sink0_channel,
|
46 |
|
|
input sink0_startofpacket,
|
47 |
|
|
input sink0_endofpacket,
|
48 |
|
|
output sink0_ready,
|
49 |
|
|
|
50 |
|
|
input sink1_valid,
|
51 |
|
|
input [96-1 : 0] sink1_data,
|
52 |
|
|
input [6-1: 0] sink1_channel,
|
53 |
|
|
input sink1_startofpacket,
|
54 |
|
|
input sink1_endofpacket,
|
55 |
|
|
output sink1_ready,
|
56 |
|
|
|
57 |
|
|
input sink2_valid,
|
58 |
|
|
input [96-1 : 0] sink2_data,
|
59 |
|
|
input [6-1: 0] sink2_channel,
|
60 |
|
|
input sink2_startofpacket,
|
61 |
|
|
input sink2_endofpacket,
|
62 |
|
|
output sink2_ready,
|
63 |
|
|
|
64 |
|
|
input sink3_valid,
|
65 |
|
|
input [96-1 : 0] sink3_data,
|
66 |
|
|
input [6-1: 0] sink3_channel,
|
67 |
|
|
input sink3_startofpacket,
|
68 |
|
|
input sink3_endofpacket,
|
69 |
|
|
output sink3_ready,
|
70 |
|
|
|
71 |
|
|
input sink4_valid,
|
72 |
|
|
input [96-1 : 0] sink4_data,
|
73 |
|
|
input [6-1: 0] sink4_channel,
|
74 |
|
|
input sink4_startofpacket,
|
75 |
|
|
input sink4_endofpacket,
|
76 |
|
|
output sink4_ready,
|
77 |
|
|
|
78 |
|
|
input sink5_valid,
|
79 |
|
|
input [96-1 : 0] sink5_data,
|
80 |
|
|
input [6-1: 0] sink5_channel,
|
81 |
|
|
input sink5_startofpacket,
|
82 |
|
|
input sink5_endofpacket,
|
83 |
|
|
output sink5_ready,
|
84 |
|
|
|
85 |
|
|
|
86 |
|
|
// ----------------------
|
87 |
|
|
// Source
|
88 |
|
|
// ----------------------
|
89 |
|
|
output src_valid,
|
90 |
|
|
output [96-1 : 0] src_data,
|
91 |
|
|
output [6-1 : 0] src_channel,
|
92 |
|
|
output src_startofpacket,
|
93 |
|
|
output src_endofpacket,
|
94 |
|
|
input src_ready,
|
95 |
|
|
|
96 |
|
|
// ----------------------
|
97 |
|
|
// Clock & Reset
|
98 |
|
|
// ----------------------
|
99 |
|
|
input clk,
|
100 |
|
|
input reset
|
101 |
|
|
);
|
102 |
|
|
localparam PAYLOAD_W = 96 + 6 + 2;
|
103 |
|
|
localparam NUM_INPUTS = 6;
|
104 |
|
|
localparam SHARE_COUNTER_W = 1;
|
105 |
|
|
localparam PIPELINE_ARB = 0;
|
106 |
|
|
localparam ST_DATA_W = 96;
|
107 |
|
|
localparam ST_CHANNEL_W = 6;
|
108 |
|
|
localparam PKT_TRANS_LOCK = 60;
|
109 |
|
|
|
110 |
|
|
// ------------------------------------------
|
111 |
|
|
// Signals
|
112 |
|
|
// ------------------------------------------
|
113 |
|
|
wire [NUM_INPUTS - 1 : 0] request;
|
114 |
|
|
wire [NUM_INPUTS - 1 : 0] valid;
|
115 |
|
|
wire [NUM_INPUTS - 1 : 0] grant;
|
116 |
|
|
wire [NUM_INPUTS - 1 : 0] next_grant;
|
117 |
|
|
reg [NUM_INPUTS - 1 : 0] saved_grant;
|
118 |
|
|
reg [PAYLOAD_W - 1 : 0] src_payload;
|
119 |
|
|
wire last_cycle;
|
120 |
|
|
reg packet_in_progress;
|
121 |
|
|
reg update_grant;
|
122 |
|
|
|
123 |
|
|
wire [PAYLOAD_W - 1 : 0] sink0_payload;
|
124 |
|
|
wire [PAYLOAD_W - 1 : 0] sink1_payload;
|
125 |
|
|
wire [PAYLOAD_W - 1 : 0] sink2_payload;
|
126 |
|
|
wire [PAYLOAD_W - 1 : 0] sink3_payload;
|
127 |
|
|
wire [PAYLOAD_W - 1 : 0] sink4_payload;
|
128 |
|
|
wire [PAYLOAD_W - 1 : 0] sink5_payload;
|
129 |
|
|
|
130 |
|
|
assign valid[0] = sink0_valid;
|
131 |
|
|
assign valid[1] = sink1_valid;
|
132 |
|
|
assign valid[2] = sink2_valid;
|
133 |
|
|
assign valid[3] = sink3_valid;
|
134 |
|
|
assign valid[4] = sink4_valid;
|
135 |
|
|
assign valid[5] = sink5_valid;
|
136 |
|
|
|
137 |
|
|
|
138 |
|
|
// ------------------------------------------
|
139 |
|
|
// ------------------------------------------
|
140 |
|
|
// Grant Logic & Updates
|
141 |
|
|
// ------------------------------------------
|
142 |
|
|
// ------------------------------------------
|
143 |
|
|
reg [NUM_INPUTS - 1 : 0] lock;
|
144 |
|
|
always @* begin
|
145 |
|
|
lock[0] = sink0_data[60];
|
146 |
|
|
lock[1] = sink1_data[60];
|
147 |
|
|
lock[2] = sink2_data[60];
|
148 |
|
|
lock[3] = sink3_data[60];
|
149 |
|
|
lock[4] = sink4_data[60];
|
150 |
|
|
lock[5] = sink5_data[60];
|
151 |
|
|
end
|
152 |
|
|
|
153 |
|
|
assign last_cycle = src_valid & src_ready & src_endofpacket & ~(|(lock & grant));
|
154 |
|
|
|
155 |
|
|
// ------------------------------------------
|
156 |
|
|
// We're working on a packet at any time valid is high, except
|
157 |
|
|
// when this is the endofpacket.
|
158 |
|
|
// ------------------------------------------
|
159 |
|
|
always @(posedge clk or posedge reset) begin
|
160 |
|
|
if (reset) begin
|
161 |
|
|
packet_in_progress <= 1'b0;
|
162 |
|
|
end
|
163 |
|
|
else begin
|
164 |
|
|
if (last_cycle)
|
165 |
|
|
packet_in_progress <= 1'b0;
|
166 |
|
|
else if (src_valid)
|
167 |
|
|
packet_in_progress <= 1'b1;
|
168 |
|
|
end
|
169 |
|
|
end
|
170 |
|
|
|
171 |
|
|
|
172 |
|
|
// ------------------------------------------
|
173 |
|
|
// Shares
|
174 |
|
|
//
|
175 |
|
|
// Special case: all-equal shares _should_ be optimized into assigning a
|
176 |
|
|
// constant to next_grant_share.
|
177 |
|
|
// Special case: all-1's shares _should_ result in the share counter
|
178 |
|
|
// being optimized away.
|
179 |
|
|
// ------------------------------------------
|
180 |
|
|
// Input | arb shares | counter load value
|
181 |
|
|
// 0 | 1 | 0
|
182 |
|
|
// 1 | 1 | 0
|
183 |
|
|
// 2 | 1 | 0
|
184 |
|
|
// 3 | 1 | 0
|
185 |
|
|
// 4 | 1 | 0
|
186 |
|
|
// 5 | 1 | 0
|
187 |
|
|
wire [SHARE_COUNTER_W - 1 : 0] share_0 = 1'd0;
|
188 |
|
|
wire [SHARE_COUNTER_W - 1 : 0] share_1 = 1'd0;
|
189 |
|
|
wire [SHARE_COUNTER_W - 1 : 0] share_2 = 1'd0;
|
190 |
|
|
wire [SHARE_COUNTER_W - 1 : 0] share_3 = 1'd0;
|
191 |
|
|
wire [SHARE_COUNTER_W - 1 : 0] share_4 = 1'd0;
|
192 |
|
|
wire [SHARE_COUNTER_W - 1 : 0] share_5 = 1'd0;
|
193 |
|
|
|
194 |
|
|
// ------------------------------------------
|
195 |
|
|
// Choose the share value corresponding to the grant.
|
196 |
|
|
// ------------------------------------------
|
197 |
|
|
reg [SHARE_COUNTER_W - 1 : 0] next_grant_share;
|
198 |
|
|
always @* begin
|
199 |
|
|
next_grant_share =
|
200 |
|
|
share_0 & { SHARE_COUNTER_W {next_grant[0]} } |
|
201 |
|
|
share_1 & { SHARE_COUNTER_W {next_grant[1]} } |
|
202 |
|
|
share_2 & { SHARE_COUNTER_W {next_grant[2]} } |
|
203 |
|
|
share_3 & { SHARE_COUNTER_W {next_grant[3]} } |
|
204 |
|
|
share_4 & { SHARE_COUNTER_W {next_grant[4]} } |
|
205 |
|
|
share_5 & { SHARE_COUNTER_W {next_grant[5]} };
|
206 |
|
|
end
|
207 |
|
|
|
208 |
|
|
// ------------------------------------------
|
209 |
|
|
// Flag to indicate first packet of an arb sequence.
|
210 |
|
|
// ------------------------------------------
|
211 |
|
|
wire grant_changed = ~packet_in_progress && ~(|(saved_grant & valid));
|
212 |
|
|
reg first_packet_r;
|
213 |
|
|
wire first_packet = grant_changed | first_packet_r;
|
214 |
|
|
always @(posedge clk or posedge reset) begin
|
215 |
|
|
if (reset) begin
|
216 |
|
|
first_packet_r <= 1'b0;
|
217 |
|
|
end
|
218 |
|
|
else begin
|
219 |
|
|
if (update_grant)
|
220 |
|
|
first_packet_r <= 1'b1;
|
221 |
|
|
else if (last_cycle)
|
222 |
|
|
first_packet_r <= 1'b0;
|
223 |
|
|
else if (grant_changed)
|
224 |
|
|
first_packet_r <= 1'b1;
|
225 |
|
|
end
|
226 |
|
|
end
|
227 |
|
|
|
228 |
|
|
// ------------------------------------------
|
229 |
|
|
// Compute the next share-count value.
|
230 |
|
|
// ------------------------------------------
|
231 |
|
|
reg [SHARE_COUNTER_W - 1 : 0] p1_share_count;
|
232 |
|
|
reg [SHARE_COUNTER_W - 1 : 0] share_count;
|
233 |
|
|
reg share_count_zero_flag;
|
234 |
|
|
|
235 |
|
|
always @* begin
|
236 |
|
|
if (first_packet) begin
|
237 |
|
|
p1_share_count = next_grant_share;
|
238 |
|
|
end
|
239 |
|
|
else begin
|
240 |
|
|
// Update the counter, but don't decrement below 0.
|
241 |
|
|
p1_share_count = share_count_zero_flag ? '0 : share_count - 1'b1;
|
242 |
|
|
end
|
243 |
|
|
end
|
244 |
|
|
|
245 |
|
|
// ------------------------------------------
|
246 |
|
|
// Update the share counter and share-counter=zero flag.
|
247 |
|
|
// ------------------------------------------
|
248 |
|
|
always @(posedge clk or posedge reset) begin
|
249 |
|
|
if (reset) begin
|
250 |
|
|
share_count <= '0;
|
251 |
|
|
share_count_zero_flag <= 1'b1;
|
252 |
|
|
end
|
253 |
|
|
else begin
|
254 |
|
|
if (last_cycle) begin
|
255 |
|
|
share_count <= p1_share_count;
|
256 |
|
|
share_count_zero_flag <= (p1_share_count == '0);
|
257 |
|
|
end
|
258 |
|
|
end
|
259 |
|
|
end
|
260 |
|
|
|
261 |
|
|
// ------------------------------------------
|
262 |
|
|
// For each input, maintain a final_packet signal which goes active for the
|
263 |
|
|
// last packet of a full-share packet sequence. Example: if I have 4
|
264 |
|
|
// shares and I'm continuously requesting, final_packet is active in the
|
265 |
|
|
// 4th packet.
|
266 |
|
|
// ------------------------------------------
|
267 |
|
|
wire final_packet_0 = 1'b1;
|
268 |
|
|
|
269 |
|
|
wire final_packet_1 = 1'b1;
|
270 |
|
|
|
271 |
|
|
wire final_packet_2 = 1'b1;
|
272 |
|
|
|
273 |
|
|
wire final_packet_3 = 1'b1;
|
274 |
|
|
|
275 |
|
|
wire final_packet_4 = 1'b1;
|
276 |
|
|
|
277 |
|
|
wire final_packet_5 = 1'b1;
|
278 |
|
|
|
279 |
|
|
|
280 |
|
|
// ------------------------------------------
|
281 |
|
|
// Concatenate all final_packet signals (wire or reg) into a handy vector.
|
282 |
|
|
// ------------------------------------------
|
283 |
|
|
wire [NUM_INPUTS - 1 : 0] final_packet = {
|
284 |
|
|
final_packet_5,
|
285 |
|
|
final_packet_4,
|
286 |
|
|
final_packet_3,
|
287 |
|
|
final_packet_2,
|
288 |
|
|
final_packet_1,
|
289 |
|
|
final_packet_0
|
290 |
|
|
};
|
291 |
|
|
|
292 |
|
|
// ------------------------------------------
|
293 |
|
|
// ------------------------------------------
|
294 |
|
|
wire p1_done = |(final_packet & grant);
|
295 |
|
|
|
296 |
|
|
// ------------------------------------------
|
297 |
|
|
// Flag for the first cycle of packets within an
|
298 |
|
|
// arb sequence
|
299 |
|
|
// ------------------------------------------
|
300 |
|
|
reg first_cycle;
|
301 |
|
|
always @(posedge clk, posedge reset) begin
|
302 |
|
|
if (reset)
|
303 |
|
|
first_cycle <= 0;
|
304 |
|
|
else
|
305 |
|
|
first_cycle <= last_cycle && ~p1_done;
|
306 |
|
|
end
|
307 |
|
|
|
308 |
|
|
|
309 |
|
|
always @* begin
|
310 |
|
|
update_grant = 0;
|
311 |
|
|
|
312 |
|
|
// ------------------------------------------
|
313 |
|
|
// No arbitration pipeline, update grant whenever
|
314 |
|
|
// the current arb winner has consumed all shares,
|
315 |
|
|
// or all requests are low
|
316 |
|
|
// ------------------------------------------
|
317 |
|
|
update_grant = (last_cycle && p1_done) || (first_cycle && ~(|valid));
|
318 |
|
|
update_grant = last_cycle;
|
319 |
|
|
end
|
320 |
|
|
|
321 |
|
|
wire save_grant;
|
322 |
|
|
assign save_grant = 1;
|
323 |
|
|
assign grant = next_grant;
|
324 |
|
|
|
325 |
|
|
always @(posedge clk, posedge reset) begin
|
326 |
|
|
if (reset)
|
327 |
|
|
saved_grant <= '0;
|
328 |
|
|
else if (save_grant)
|
329 |
|
|
saved_grant <= next_grant;
|
330 |
|
|
end
|
331 |
|
|
|
332 |
|
|
// ------------------------------------------
|
333 |
|
|
// ------------------------------------------
|
334 |
|
|
// Arbitrator
|
335 |
|
|
// ------------------------------------------
|
336 |
|
|
// ------------------------------------------
|
337 |
|
|
|
338 |
|
|
// ------------------------------------------
|
339 |
|
|
// Create a request vector that stays high during
|
340 |
|
|
// the packet for unpipelined arbitration.
|
341 |
|
|
//
|
342 |
|
|
// The pipelined arbitration scheme does not require
|
343 |
|
|
// request to be held high during the packet.
|
344 |
|
|
// ------------------------------------------
|
345 |
|
|
assign request = valid;
|
346 |
|
|
|
347 |
|
|
|
348 |
|
|
altera_merlin_arbitrator
|
349 |
|
|
#(
|
350 |
|
|
.NUM_REQUESTERS(NUM_INPUTS),
|
351 |
|
|
.SCHEME ("no-arb"),
|
352 |
|
|
.PIPELINE (0)
|
353 |
|
|
) arb (
|
354 |
|
|
.clk (clk),
|
355 |
|
|
.reset (reset),
|
356 |
|
|
.request (request),
|
357 |
|
|
.grant (next_grant),
|
358 |
|
|
.save_top_priority (src_valid),
|
359 |
|
|
.increment_top_priority (update_grant)
|
360 |
|
|
);
|
361 |
|
|
|
362 |
|
|
// ------------------------------------------
|
363 |
|
|
// ------------------------------------------
|
364 |
|
|
// Mux
|
365 |
|
|
//
|
366 |
|
|
// Implemented as a sum of products.
|
367 |
|
|
// ------------------------------------------
|
368 |
|
|
// ------------------------------------------
|
369 |
|
|
|
370 |
|
|
assign sink0_ready = src_ready && grant[0];
|
371 |
|
|
assign sink1_ready = src_ready && grant[1];
|
372 |
|
|
assign sink2_ready = src_ready && grant[2];
|
373 |
|
|
assign sink3_ready = src_ready && grant[3];
|
374 |
|
|
assign sink4_ready = src_ready && grant[4];
|
375 |
|
|
assign sink5_ready = src_ready && grant[5];
|
376 |
|
|
|
377 |
|
|
assign src_valid = |(grant & valid);
|
378 |
|
|
|
379 |
|
|
always @* begin
|
380 |
|
|
src_payload =
|
381 |
|
|
sink0_payload & {PAYLOAD_W {grant[0]} } |
|
382 |
|
|
sink1_payload & {PAYLOAD_W {grant[1]} } |
|
383 |
|
|
sink2_payload & {PAYLOAD_W {grant[2]} } |
|
384 |
|
|
sink3_payload & {PAYLOAD_W {grant[3]} } |
|
385 |
|
|
sink4_payload & {PAYLOAD_W {grant[4]} } |
|
386 |
|
|
sink5_payload & {PAYLOAD_W {grant[5]} };
|
387 |
|
|
end
|
388 |
|
|
|
389 |
|
|
// ------------------------------------------
|
390 |
|
|
// Mux Payload Mapping
|
391 |
|
|
// ------------------------------------------
|
392 |
|
|
|
393 |
|
|
assign sink0_payload = {sink0_channel,sink0_data,
|
394 |
|
|
sink0_startofpacket,sink0_endofpacket};
|
395 |
|
|
assign sink1_payload = {sink1_channel,sink1_data,
|
396 |
|
|
sink1_startofpacket,sink1_endofpacket};
|
397 |
|
|
assign sink2_payload = {sink2_channel,sink2_data,
|
398 |
|
|
sink2_startofpacket,sink2_endofpacket};
|
399 |
|
|
assign sink3_payload = {sink3_channel,sink3_data,
|
400 |
|
|
sink3_startofpacket,sink3_endofpacket};
|
401 |
|
|
assign sink4_payload = {sink4_channel,sink4_data,
|
402 |
|
|
sink4_startofpacket,sink4_endofpacket};
|
403 |
|
|
assign sink5_payload = {sink5_channel,sink5_data,
|
404 |
|
|
sink5_startofpacket,sink5_endofpacket};
|
405 |
|
|
|
406 |
|
|
assign {src_channel,src_data,src_startofpacket,src_endofpacket} = src_payload;
|
407 |
|
|
endmodule
|
408 |
|
|
|
409 |
|
|
|
410 |
|
|
|