OpenCores
URL https://opencores.org/ocsvn/openrisc_2011-10-31/openrisc_2011-10-31/trunk

Subversion Repositories openrisc_2011-10-31

Compare Revisions

  • This comparison shows the changes necessary to convert path
    /openrisc/trunk/orpsocv2/boards/xilinx/ml501
    from Rev 478 to Rev 479
    Reverse comparison

Rev 478 → Rev 479

/rtl/verilog/include/or1200_defines.v
210,7 → 210,7
// Disable bursts if they are not supported by the
// memory subsystem (only affect cache line fill)
//
`define OR1200_NO_BURSTS
//`define OR1200_NO_BURSTS
//
 
//
/rtl/verilog/xilinx_ddr2/xilinx_ddr2_if.v
1,15 → 1,14
//////////////////////////////////////////////////////////////////////
//// ////
//// Xilinx DDR2 controller Wishbone Interface ////
//// Xilinx ML501 DDR2 controller Wishbone Interface ////
//// ////
//// Description ////
//// Simple interface to the Xilinx MIG generated DDR2 controller////
//// ////
//// To Do: ////
//// Increase usage of cache BRAM to maximum (currently only ////
//// 256 bytes out of about 8192) ////
//// Make this a Wishbone B3 registered feedback burst friendly ////
//// server. ////
//// Use full capacity of BRAM ////
//// Employ LRU replacement scheme ////
//// Remove hard-coding of things relating to number of lines ////
//// ////
//// Author(s): ////
//// - Julius Baxter, julius.baxter@orsoc.se ////
42,38 → 41,34
//// ////
//////////////////////////////////////////////////////////////////////
/*
* The controller is design to stream lots of data out at the DDR2 controller's
* rate. All we implement here is enough to do the simplest accesses into a
* small cache, which eases the domain crossing headaches.
* This is an interface to the Xilinx MIG-sourced DDR2 controller.
*
* This was originally written to handle a DDR2 part which is doing burst length
* of 4 as a minimum via a databus which is 64-bits wide.
* The controller's interface is via FIFO buffers - one for address and control
* the other is for data. The data FIFO interface is 128-bits wide.
*
* This means the smallest accesses is 4*64=256-bits or 32-bytes.
* This module has a cache with different aspects on each port. As we're to
* ultimately interface to a 32-bit wide Wishbone bus, one side is 32-bits
* and the other is 128-bits wide to accommodate the DDR2 controller's data
* path.
*
* We are bridging to a 32-bit wide system bus, so this means we must handle
* accesses in 8-word lots.
* At present, the cache controller doesn't employ associativity, so any
* line can be used for any location. A round-robin approach to line
* use is employed. TODO is LRU scheme instead of round robin.
*
* A simple cache mechanism has been implemented, meaning we check if the cached
* data has been written to, and therefore needs writing back to the main memory
* before any other access can occur.
* The cache is a macro generated by Xilinx's IP generation tool. This is
* because memories with dual-aspect ratios cannot be inferred via HDL.
*
* Cache memory:
* The cache memory is a core-generated module, instantiating something out
* of the XilinxCoreLib. The reason is because an arrangement or RAMB36s with
* different sized A and B data in/out ports can't be instantiated directly
* for some reason.
* What we have is side A with 32-bits, and side B with 128-bits wide.
* The size of lines, as set by the defines, controls how long each read
* and write burst to/from the SDRAM is.
*
* TODO:
* This only supports 8-words for now but can easily be expanded, although
* multiple way/associativity caching will require some extra work to handle
* multiple cached addresses.
* There are two clock domains - the Wishbone and the DDR2 controller domain.
*
* But it should be easy enough to make this thing cache as much as its RAMB
* resources allow (4-RAMB16s becuase due to the 128-bit DDR2-side interface)
* which is about 8Kbyte.
* A signal is sent to control logic in the DDR2 domain side to load and store
* the contents of a particular line from and to the DDR2 controller's data
* FIFOs. This loading and storing is done at the DDR2 clock domain's rate.
*
* The writing of address and control data is done from the Wishbone domain.
*
* Multi-cycle paths:
* Write:
* To indicate that a writeback is occuring, a system-bus domain (wishbone, in
84,7 → 79,7
* "ddr2_write_done" is then sampled by the system-bus domain and "do_writeback"
* So there are paths between:
* ( register -> (sampled by) -> register )
* wb_clk:do_writeback -> ddr2_clk:do_writeback_ddr2_shifter
* wb_clk:do_writeback -> ddr2_clk:do_writeback_ddr2
* wb_clk:do_writeback -> ddr2_clk:ddr2_write_done
* ddr2_clk:ddr2_write_done -> wb_clk:do_writeback
*
99,8 → 94,8
* ddr2_clk:ddr2_read_done -> wb_clk:do_readfrom
* wb_clk:do_readfrom -> ddr2_clk:ddr2_read_done
*
*/
module xilinx_ddr2_if (
*/
module xilinx_ddr2_if2 (
input [31:0] wb_adr_i,
input wb_stb_i,
input wb_cyc_i,
111,7 → 106,7
input [31:0] wb_dat_i,
output [31:0] wb_dat_o,
output reg wb_ack_o,
 
output [12:0] ddr2_a,
output [1:0] ddr2_ba,
output ddr2_ras_n,
127,15 → 122,37
inout [7:0] ddr2_dqs_n,
output [1:0] ddr2_ck,
output [1:0] ddr2_ck_n,
 
input ddr2_if_clk,
input ddr2_if_rst,
input ddr2_if_rst,
input idly_clk_200,
input wb_clk,
input wb_rst);
`include "xilinx_ddr2_params.v"
 
// Define to add a counter, signaling error if the controller locks up
// (no ack after a certain period of time)
//`define ERR_COUNTER
/*
`define DDR2_CACHE_NUM_LINES 16
`define DDR2_CACHE_NUM_LINES_ENC_WIDTH 4 // log2(`DDR2_CACHE_NUM_LINES)
*/
`define DDR2_CACHE_NUM_LINES 4
`define DDR2_CACHE_NUM_LINES_ENC_WIDTH 2 // log2(`DDR2_CACHE_NUM_LINES)
 
`define DDR2_CACHE_NUM_WORDS_PER_LINE 256
`define DDR2_CACHE_ADDR_WIDTH_WORDS_PER_LINE 8
`define DDR2_CACHE_TAG_ADDR_WIDTH (32-`DDR2_CACHE_ADDR_WIDTH_WORDS_PER_LINE-2)
 
`define DDR2_CACHE_DDR2_SIDE_NUM_WORDS_PER_LINE (`DDR2_CACHE_NUM_WORDS_PER_LINE/4)
`define DDR2_CACHE_DDR2_SIDE_ADDR_WIDTH_NUM_WORDS_PER_LINE (`DDR2_CACHE_ADDR_WIDTH_WORDS_PER_LINE - 2)
`define DDR2_CACHE_DDR2_SIDE_ADDR_WIDTH (`DDR2_CACHE_NUM_LINES_ENC_WIDTH + `DDR2_CACHE_DDR2_SIDE_ADDR_WIDTH_NUM_WORDS_PER_LINE)
 
`define DDR2_CACHE_TAG_BITS 31:(`DDR2_CACHE_ADDR_WIDTH_WORDS_PER_LINE+2)
 
wire ddr2_clk; // DDR2 iface domain clock.
wire ddr2_rst; // reset from the ddr2 module
146,28 → 163,38
wire wb_req_new;
reg wb_req_new_r;
reg wb_req_addr_hit;
wire wb_req_addr_hit;
reg cached_addr_valid;
wire cached_addr_valid;
reg [31:6] cached_addr;
 
wire [31:(32 -`DDR2_CACHE_TAG_ADDR_WIDTH)] cached_addr;
`define DDR2_BURST_8_DQ64_ADDR_WIDTH 4 // = log2(burst of 8 64-bits = 16 words)
`define DDR2_BURST_4_DQ64_ADDR_WIDTH 3 // = log2(burst of 4 64-bits = 8 words)
// This counts how many addresses we should write to the fifo - the number
// of discrete FIFO transactions.
reg [`DDR2_CACHE_ADDR_WIDTH_WORDS_PER_LINE-`DDR2_BURST_8_DQ64_ADDR_WIDTH - 1:0] addr_counter;
 
wire cache_write;
wire cache_hit;
reg cache_dirty;
reg [2:0] wb_req_cache_word_addr;
wire wb_cache_en;
reg do_writeback, do_writeback_r;
wire do_writeback_start, do_writeback_finished;
wire doing_writeback;
// Wire to indicate writing to data FIFO of MIG has completed
wire do_writeback_data_finished;
// Wire to indicate that address FIFO of MIG should be written to to
// initiate memory accesses.
reg do_writeback_addresses, do_writeback_addresses_r;
reg do_readfrom, do_readfrom_r;
wire do_readfrom_start, do_readfrom_finished;
wire doing_readfrom;
reg do_af_write;
// Domain crossing logic
reg wb_clk_r;
177,13 → 204,10
wire wb_clk_edge;
reg [2:0] ddr2_clk_phase;
// Sample when clk phase is 0
reg [7:0] do_writeback_ddr2_shifter;
reg [7:0] do_writeback_ddr2_shifter_r;
reg do_writeback_ddr2;
reg do_writeback_ddr2_fifo_we;
reg ddr2_write_done;
// Currently, ddr2-side of cache is address is a single bit
reg [1:0] ddr2_cache_addr;
reg [`DDR2_CACHE_DDR2_SIDE_ADDR_WIDTH_NUM_WORDS_PER_LINE - 1:0] ddr2_cache_line_word_addr;
wire [127:0] ddr2_cache_data_o;
reg rd_data_valid_r;
reg ddr2_read_done;
193,8 → 217,11
wire app_wdf_afull;
wire app_wdf_wren;
wire app_af_wren;
wire [30:0] writeback_af_addr;
wire [30:0] readfrom_af_addr;
wire [30:0] app_af_addr;
wire [2:0] app_af_cmd;
wire [(APPDATA_WIDTH)-1:0] app_wdf_data;
wire [(APPDATA_WIDTH/8)-1:0] app_wdf_mask_data;
wire rd_data_valid;
201,8 → 228,87
wire [(APPDATA_WIDTH)-1:0] rd_data_fifo_out;
wire phy_init_done;
assign cache_hit = (cached_addr == wb_adr_i[31:6]) & cached_addr_valid;
wire [`DDR2_CACHE_NUM_LINES - 1 :0] cache_line_addr_validate;
wire [`DDR2_CACHE_NUM_LINES - 1 :0] cache_line_addr_invalidate;
wire [`DDR2_CACHE_NUM_LINES - 1 :0] cache_line_addr_valid;
wire [`DDR2_CACHE_NUM_LINES - 1 :0] cache_line_hit;
wire [`DDR2_CACHE_TAG_BITS] cache_line_addr [0:`DDR2_CACHE_NUM_LINES-1] ;
 
// Cache control signals
// Wishbone side
wire [`DDR2_CACHE_ADDR_WIDTH_WORDS_PER_LINE-1:0] wb_cache_adr;
wire [3:0] wb_cache_sel_we;
// DDR side
wire ddr2_cache_en;
wire [15:0] ddr2_cache_we;
 
reg wb_bursting; // Indicate if burst is enabled
reg [3:0] wb_burst_addr; // Burst counter, up to 16
wire [1:0] wb_burst_addr_4beat;
wire [2:0] wb_burst_addr_8beat;
wire wb_burst_addr_incr;
wire ack_err;
reg ack_err_r;
 
// Decoded select line
wire [`DDR2_CACHE_NUM_LINES-1:0] selected_cache_line;
wire [`DDR2_CACHE_NUM_LINES_ENC_WIDTH-1:0] selected_cache_line_enc;
reg [`DDR2_CACHE_NUM_LINES_ENC_WIDTH-1:0] selected_cache_line_enc_ddr2_clk;
genvar i;
generate
for (i=0;i<`DDR2_CACHE_NUM_LINES;i=i+1) begin : cache_addr
xilinx_ddr2_wb_if_cache_adr_reg cache_addr_reg_inst
( .adr_i(wb_adr_i[`DDR2_CACHE_TAG_BITS]),
.validate(cache_line_addr_validate[i]),
.invalidate(cache_line_addr_invalidate[i]),
.cache_hit(cache_line_hit[i]),
.adr_valid(cache_line_addr_valid[i]),
.cached_adr_o(cache_line_addr[i]),
.clk(wb_clk),
.rst(wb_rst));
end
endgenerate
 
wire start_writeback, start_fill;
xilinx_ddr2_wb_if_cache_control xilinx_ddr2_wb_if_cache_control0
(
// Outputs
.start_writeback (start_writeback),
.start_fill (start_fill),
.cache_line_validate (cache_line_addr_validate),
.cache_line_invalidate (cache_line_addr_invalidate),
.selected_cache_line (selected_cache_line),
.selected_cache_line_enc (selected_cache_line_enc),
// Inputs
.cache_line_addr_valid (cache_line_addr_valid),
.cache_line_addr_hit (cache_line_hit),
.wb_req (wb_req),
.cache_write (cache_write),
.writeback_done (do_writeback_finished),
.fill_done (do_readfrom_finished),
.wb_clk (wb_clk),
.wb_rst (wb_rst));
 
defparam xilinx_ddr2_wb_if_cache_control0.num_lines = `DDR2_CACHE_NUM_LINES;
defparam xilinx_ddr2_wb_if_cache_control0.num_lines_log2 = `DDR2_CACHE_NUM_LINES_ENC_WIDTH;
 
assign cached_addr = selected_cache_line[0] ? cache_line_addr[0] :
selected_cache_line[1] ? cache_line_addr[1] :
selected_cache_line[2] ? cache_line_addr[2] :
selected_cache_line[3] ? cache_line_addr[3] : 0;
assign cache_write = wb_req & wb_we_i & wb_ack_o;
assign cache_hit = |(selected_cache_line & cache_line_hit);
assign cached_addr_valid = |(selected_cache_line & cache_line_addr_valid);
assign wb_req_addr_hit = (wb_req & cache_hit & cached_addr_valid);
// Wishbone request detection
assign wb_req = wb_stb_i & wb_cyc_i & phy_init_done;
210,63 → 316,118
wb_req_r <= wb_req;
assign wb_req_new = wb_req & !wb_req_r;
 
always @(posedge wb_clk)
wb_req_new_r <= wb_req_new;
 
always @(posedge wb_clk)
if (wb_rst)
wb_bursting <= 0;
// Reset if acking end of transfer
else if (wb_ack_o && wb_cti_i == 3'b111)
wb_bursting <= 0;
// Set if beginning new transaction and incrementing burst indicated
// TODO - double check if this burst is going to go over a cache line
// boundary - if so don't allow burst, fall back to classic cycles.
else if (wb_req_new)
wb_bursting <= (wb_cti_i == 3'b010);
 
// Help constrain additions to appropriate bit-width for wrapping
assign wb_burst_addr_4beat = wb_adr_i[3:2] + 1;
assign wb_burst_addr_8beat = wb_adr_i[4:2] + 1;
// Register whether it's a hit or not
// As more lines are added, add them to this check.
// Increment burst address whenever we get a hit when reading, or
// when acking and writing.
assign wb_burst_addr_incr = (wb_req_addr_hit & (!wb_we_i |
(wb_we_i & wb_ack_o)));
// Calculate burst address depending on burst type indicator
always @(posedge wb_clk)
if (wb_rst)
wb_req_addr_hit <= 0;
else
wb_req_addr_hit <= wb_req & cache_hit & cached_addr_valid;
wb_burst_addr <= 0;
else if (wb_req_new)
// When we have a bursting read to an address which is in cache then
// initialise the address to the next word in the burst sequence.
// If it's a miss, or it's a write, then we just take what's on the
// bus.
wb_burst_addr <= !(wb_req_addr_hit & !wb_we_i) ? wb_adr_i[5:2] :
wb_bte_i==2'b01 ? {wb_adr_i[5:4], wb_burst_addr_4beat }:
wb_bte_i==2'b10 ? {wb_adr_i[5], wb_burst_addr_8beat }:
wb_bte_i==2'b11 ? wb_adr_i[5:2] + 1 :
wb_adr_i[5:2];
else if (wb_burst_addr_incr & wb_bte_i==2'b01)
wb_burst_addr[1:0] <= wb_burst_addr[1:0] + 1;
else if (wb_burst_addr_incr & wb_bte_i==2'b10)
wb_burst_addr[2:0] <= wb_burst_addr[2:0] + 1;
else if (wb_burst_addr_incr & wb_bte_i==2'b11)
wb_burst_addr[3:0] <= wb_burst_addr[3:0] + 1;
`ifdef ERR_COUNTER
reg [26:0] ack_err_cntr;
always @(posedge wb_clk)
if (wb_rst)
ack_err_cntr <= 0;
else if (!wb_req)
ack_err_cntr <= 0;
else if (|ack_err_cntr)
ack_err_cntr <= ack_err_cntr + 1;
else if (wb_req_new & !(|ack_err_cntr))
ack_err_cntr <= 1;
 
assign ack_err = (&ack_err_cntr);
always @(posedge wb_clk)
ack_err_r <= ack_err;
 
assign wb_err_o = ack_err_r;
`else // !`ifdef ERR_COUNTER
assign ack_err = 0;
always @(posedge wb_clk)
ack_err_r <= 0;
 
assign wb_err_o = 0;
`endif
always @(posedge wb_clk)
if (wb_rst)
wb_ack_o <= 0;
else
wb_ack_o <= wb_req_addr_hit & !wb_ack_o & !wb_ack_o_r;
wb_ack_o <= wb_req_addr_hit &
(
// Simple acks on classic cycles
(!wb_bursting && !wb_ack_o && !wb_ack_o_r)
// De-assert ack when we see the final transaction
|| (wb_bursting && !(wb_cti_i==3'b111))
);
always @(posedge wb_clk)
wb_ack_o_r <= wb_ack_o;
// Address valid logic
// Writeback/readfrom lower address generation
always @(posedge wb_clk)
if (wb_rst)
cached_addr_valid <= 0;
else if (do_readfrom_finished)
cached_addr_valid <= 1;
else if ( do_writeback_finished ) // Data written back, cache not valid
cached_addr_valid <= 0;
else if (wb_req & !cache_hit & cached_addr_valid & !cache_dirty)
// Invalidate cache so a readfrom begins
cached_addr_valid <= 0;
addr_counter <= 0;
else if (app_af_wren)
addr_counter <= addr_counter+1;
// Address cacheing
// Determine if we're writing access requests into DDR2 interface AF
always @(posedge wb_clk)
if (wb_rst)
cached_addr <= 0;
else if (do_readfrom_start)
cached_addr <= wb_adr_i[31:6];
do_af_write <= 0;
else if (do_readfrom_start | do_writeback_data_finished)
do_af_write <= 1;
else if ((&addr_counter)) // Stop when counter rolls over
do_af_write <= 0;
// Cache dirty signal
always @(posedge wb_clk)
if (wb_rst)
cache_dirty <= 0;
else if (wb_req & wb_we_i & wb_req_addr_hit & wb_ack_o)
cache_dirty <= 1;
else if (!cached_addr_valid & cache_dirty)
cache_dirty <= 0;
 
// Wishbone side of cache enable. Important!
// 1. Enable on first access, if it's not a write
// 2. Enable if we've just refreshed the cache
// 3. Enable on ACK'ing for a write
assign wb_cache_en = (wb_req_new & !wb_we_i) | do_readfrom_finished |
(wb_req_addr_hit & wb_stb_i & !wb_we_i & !wb_ack_o) |
(wb_ack_o & wb_we_i);
// Wishbone side of cache enable. Always enabled unless doing DDR2-side
// things (fill or writeback).
assign wb_cache_en = !(do_readfrom | do_writeback);
// Writeback detect logic
always @(posedge wb_clk)
if (wb_rst)
273,17 → 434,32
do_writeback <= 0;
else if (ddr2_write_done) // DDR2 domain signal
do_writeback <= 0;
else if (wb_req & !cache_hit & cached_addr_valid & !doing_writeback & cache_dirty)
else if (start_writeback)
do_writeback <= 1;
 
always @(posedge wb_clk)
do_writeback_r <= do_writeback;
 
// Detect falling edge of do_writeback
assign do_writeback_data_finished = !do_writeback & do_writeback_r;
always @(posedge wb_clk)
if (wb_rst)
do_writeback_addresses <= 0;
else if (do_writeback_data_finished)
do_writeback_addresses <= 1;
else if ((&addr_counter))
do_writeback_addresses <= 0;
 
always @(posedge wb_clk)
do_writeback_addresses_r <= do_writeback_addresses;
 
// Detect rising edge of do_writeback
assign do_writeback_start = do_writeback & !do_writeback_r;
assign do_writeback_finished = !do_writeback & do_writeback_r;
assign doing_writeback = do_writeback | do_writeback_r;
// Detect falling edge of address writing control signal
assign do_writeback_finished = !do_writeback_addresses &
do_writeback_addresses_r;
// DDR2 Read detect logic
always @(posedge wb_clk)
if (wb_rst)
290,28 → 466,36
do_readfrom <= 0;
else if (ddr2_read_done) // DDR2 domain signal
do_readfrom <= 0;
else if (wb_req & !cache_hit & !cached_addr_valid & !doing_readfrom & !cache_dirty)
else if (start_fill)
do_readfrom <= 1;
 
always @(posedge wb_clk)
do_readfrom_r <= do_readfrom;
 
// Detect line fill request rising edge
assign do_readfrom_start = do_readfrom & !do_readfrom_r;
// Detect line fill request falling edge
assign do_readfrom_finished = !do_readfrom & do_readfrom_r;
assign doing_readfrom = do_readfrom | do_readfrom_r;
 
// Address fifo signals
assign app_af_wren = (do_writeback_finished | do_readfrom_start);
assign app_af_cmd[0] = do_readfrom_start; // 1 - read, 0 - write
assign app_af_wren = (do_readfrom_r | do_writeback_addresses_r) &
!app_af_afull & do_af_write ;
assign app_af_cmd[0] = do_readfrom; // 1 - read, 0 - write
assign app_af_cmd[2:1] = 0;
assign app_af_addr = do_readfrom_start ? {2'd0, wb_adr_i[31:6],3'd0} :
{2'd0,cached_addr,3'd0};
 
assign writeback_af_addr = {1'd0, cached_addr, addr_counter, 3'd0};
assign readfrom_af_addr = {1'd0, wb_adr_i[`DDR2_CACHE_TAG_BITS],
addr_counter, 3'd0};
assign app_af_addr = doing_readfrom ? readfrom_af_addr : writeback_af_addr;
assign app_wdf_wren = do_writeback_ddr2_fifo_we;
assign app_wdf_data = ddr2_cache_data_o;
assign app_wdf_mask_data = 0;
always @(posedge wb_clk) if (wb_rst) wb_clk_r <= 0; else wb_clk_r <= ~wb_clk_r;
always @(posedge wb_clk)
if (wb_rst) wb_clk_r <= 0; else wb_clk_r <= ~wb_clk_r;
always @(posedge ddr2_clk) wb_clk_in_ddr2_clk <= wb_clk_r;
always @(posedge ddr2_clk) wb_clk_in_ddr2_clk_r <= wb_clk_in_ddr2_clk;
324,43 → 508,36
ddr2_clk_phase <= 0;
else
ddr2_clk_phase <= ddr2_clk_phase + 1;
always @(posedge ddr2_clk)
do_writeback_ddr2_fifo_we <= (do_writeback_ddr2_shifter_r[0]) |
(do_writeback_ddr2_shifter_r[2]) |
(do_writeback_ddr2_shifter_r[4]) |
(do_writeback_ddr2_shifter_r[6]);
 
// Kick off counting when we see that the wb_clk domain is
// doing a writeback.
always @(posedge ddr2_clk)
if (ddr2_rst)
do_writeback_ddr2_shifter <= 4'h0;
else if (|do_writeback_ddr2_shifter)
do_writeback_ddr2_shifter <= {do_writeback_ddr2_shifter[6:0], 1'b0};
else if (!(|ddr2_clk_phase) & do_writeback & !ddr2_write_done) // sample WB domain
do_writeback_ddr2_shifter <= 1;
 
 
do_writeback_ddr2 <= 0;
else if (&ddr2_cache_line_word_addr)
do_writeback_ddr2 <= 0;
else if (!(|ddr2_clk_phase) & do_writeback & // sample WB domain
!ddr2_write_done)
do_writeback_ddr2 <= 1;
always @(posedge ddr2_clk)
do_writeback_ddr2_shifter_r <= do_writeback_ddr2_shifter;
if (ddr2_rst)
ddr2_cache_line_word_addr <= 0;
else if (rd_data_valid | (do_writeback_ddr2 & !app_wdf_afull))
ddr2_cache_line_word_addr <= ddr2_cache_line_word_addr + 1;
else if (ddr2_write_done | ddr2_read_done)
ddr2_cache_line_word_addr <= 0;
 
always @(posedge ddr2_clk)
do_writeback_ddr2_fifo_we <= (do_writeback_ddr2 & !app_wdf_afull);
always @(posedge ddr2_clk)
if (ddr2_rst)
ddr2_write_done <= 0;
else if (do_writeback_ddr2_shifter[7])
else if ((&ddr2_cache_line_word_addr))
ddr2_write_done <= 1;
else if ((!(|ddr2_clk_phase)) & !do_writeback) // sample WB domain
ddr2_write_done <= 0;
always @(posedge ddr2_clk)
if (ddr2_rst)
ddr2_cache_addr <= 0;
else if (rd_data_valid | do_writeback_ddr2_fifo_we)
ddr2_cache_addr <= ddr2_cache_addr + 1;
always @(posedge ddr2_clk)
rd_data_valid_r <= rd_data_valid;
// Read done signaling to WB domain
367,23 → 544,27
always @(posedge ddr2_clk)
if (ddr2_rst)
ddr2_read_done <= 0;
// Detect read data valid falling edge
else if (!rd_data_valid & rd_data_valid_r)
else if (rd_data_valid_r & (&ddr2_cache_line_word_addr))
ddr2_read_done <= 1;
else if (!(|ddr2_clk_phase) & !do_readfrom) // Read WB domain
ddr2_read_done <= 0;
 
wire [3:0] wb_cache_adr;
assign wb_cache_adr = wb_adr_i[5:2];
wire [3:0] wb_cache_sel_we;
assign wb_cache_sel_we = {4{wb_we_i}} & wb_sel_i;
wire ddr2_cache_en;
wire [15:0] ddr2_cache_we;
assign ddr2_cache_en = rd_data_valid | (|do_writeback_ddr2_shifter);
// Lower word address uses potentially bursting address counter
assign wb_cache_adr = wb_bursting ?
{wb_adr_i[(`DDR2_CACHE_ADDR_WIDTH_WORDS_PER_LINE+2)-1:6],wb_burst_addr}:
wb_adr_i[(`DDR2_CACHE_ADDR_WIDTH_WORDS_PER_LINE+2)-1:2];
assign wb_cache_sel_we = {4{wb_we_i & wb_ack_o}} & wb_sel_i;
assign ddr2_cache_en = (rd_data_valid |do_writeback_ddr2);
assign ddr2_cache_we = {16{rd_data_valid}};
 
always @(posedge ddr2_clk)
if (!(|ddr2_clk_phase)) // Read WB domain
selected_cache_line_enc_ddr2_clk <= selected_cache_line_enc;
// Xilinx Coregen true dual-port RAMB array.
// Xilinx Coregen true dual-port RAMB
// Wishbone side : 32-bit
// DDR2 side : 128-bit
xilinx_ddr2_if_cache cache_mem0
392,7 → 573,7
.clka(wb_clk),
.ena(wb_cache_en),
.wea(wb_cache_sel_we),
.addra({8'd0,wb_cache_adr}),
.addra({2'd0, selected_cache_line_enc,wb_cache_adr}),
.dina(wb_dat_i),
.douta(wb_dat_o),
 
400,91 → 581,312
.clkb(ddr2_clk),
.enb(ddr2_cache_en),
.web(ddr2_cache_we),
.addrb({8'd0,ddr2_cache_addr}),
.addrb({2'd0, selected_cache_line_enc_ddr2_clk,
ddr2_cache_line_word_addr}),
.dinb(rd_data_fifo_out),
.doutb(ddr2_cache_data_o));
ddr2_mig #
(
.BANK_WIDTH (BANK_WIDTH),
.CKE_WIDTH (CKE_WIDTH),
.CLK_WIDTH (CLK_WIDTH),
.COL_WIDTH (COL_WIDTH),
.CS_NUM (CS_NUM),
.CS_WIDTH (CS_WIDTH),
.CS_BITS (CS_BITS),
.DM_WIDTH (DM_WIDTH),
.DQ_WIDTH (DQ_WIDTH),
.DQ_PER_DQS (DQ_PER_DQS),
.DQ_BITS (DQ_BITS),
.DQS_WIDTH (DQS_WIDTH),
.DQS_BITS (DQS_BITS),
.HIGH_PERFORMANCE_MODE (HIGH_PERFORMANCE_MODE),
.ODT_WIDTH (ODT_WIDTH),
.ROW_WIDTH (ROW_WIDTH),
.APPDATA_WIDTH (APPDATA_WIDTH),
.ADDITIVE_LAT (ADDITIVE_LAT),
.BURST_LEN (BURST_LEN),
.BURST_TYPE (BURST_TYPE),
.CAS_LAT (CAS_LAT),
.ECC_ENABLE (ECC_ENABLE),
.MULTI_BANK_EN (MULTI_BANK_EN),
.ODT_TYPE (ODT_TYPE),
.REDUCE_DRV (REDUCE_DRV),
.REG_ENABLE (REG_ENABLE),
.TREFI_NS (TREFI_NS),
.TRAS (TRAS),
.TRCD (TRCD),
.TRFC (TRFC),
.TRP (TRP),
.TRTP (TRTP),
.TWR (TWR),
.TWTR (TWTR),
.SIM_ONLY (SIM_ONLY),
.RST_ACT_LOW (RST_ACT_LOW),
.CLK_TYPE (CLK_TYPE),
.DLL_FREQ_MODE (DLL_FREQ_MODE),
.CLK_PERIOD (CLK_PERIOD)
)
.BANK_WIDTH (BANK_WIDTH),
.CKE_WIDTH (CKE_WIDTH),
.CLK_WIDTH (CLK_WIDTH),
.COL_WIDTH (COL_WIDTH),
.CS_NUM (CS_NUM),
.CS_WIDTH (CS_WIDTH),
.CS_BITS (CS_BITS),
.DM_WIDTH (DM_WIDTH),
.DQ_WIDTH (DQ_WIDTH),
.DQ_PER_DQS (DQ_PER_DQS),
.DQ_BITS (DQ_BITS),
.DQS_WIDTH (DQS_WIDTH),
.DQS_BITS (DQS_BITS),
.HIGH_PERFORMANCE_MODE (HIGH_PERFORMANCE_MODE),
.ODT_WIDTH (ODT_WIDTH),
.ROW_WIDTH (ROW_WIDTH),
.APPDATA_WIDTH (APPDATA_WIDTH),
.ADDITIVE_LAT (ADDITIVE_LAT),
.BURST_LEN (BURST_LEN),
.BURST_TYPE (BURST_TYPE),
.CAS_LAT (CAS_LAT),
.ECC_ENABLE (ECC_ENABLE),
.MULTI_BANK_EN (MULTI_BANK_EN),
.ODT_TYPE (ODT_TYPE),
.REDUCE_DRV (REDUCE_DRV),
.REG_ENABLE (REG_ENABLE),
.TREFI_NS (TREFI_NS),
.TRAS (TRAS),
.TRCD (TRCD),
.TRFC (TRFC),
.TRP (TRP),
.TRTP (TRTP),
.TWR (TWR),
.TWTR (TWTR),
.SIM_ONLY (SIM_ONLY),
.RST_ACT_LOW (RST_ACT_LOW),
.CLK_TYPE (CLK_TYPE),
.DLL_FREQ_MODE (DLL_FREQ_MODE),
.CLK_PERIOD (CLK_PERIOD)
)
ddr2_mig0
(
.sys_clk (ddr2_if_clk),
.idly_clk_200 (idly_clk_200),
.sys_rst_n (ddr2_if_rst), // Act. high, sync. to ddr2_if_clk
.ddr2_ras_n (ddr2_ras_n),
.ddr2_cas_n (ddr2_cas_n),
.ddr2_we_n (ddr2_we_n),
.ddr2_cs_n (ddr2_cs_n),
.ddr2_cke (ddr2_cke),
.ddr2_odt (ddr2_odt),
.ddr2_dm (ddr2_dm),
.ddr2_dq (ddr2_dq),
.ddr2_dqs (ddr2_dqs),
.ddr2_dqs_n (ddr2_dqs_n),
.ddr2_ck (ddr2_ck),
.ddr2_ck_n (ddr2_ck_n),
.ddr2_ba (ddr2_ba),
.ddr2_a (ddr2_a),
.sys_clk (ddr2_if_clk),
.idly_clk_200 (idly_clk_200),
.sys_rst_n (ddr2_if_rst),
.ddr2_ras_n (ddr2_ras_n),
.ddr2_cas_n (ddr2_cas_n),
.ddr2_we_n (ddr2_we_n),
.ddr2_cs_n (ddr2_cs_n),
.ddr2_cke (ddr2_cke),
.ddr2_odt (ddr2_odt),
.ddr2_dm (ddr2_dm),
.ddr2_dq (ddr2_dq),
.ddr2_dqs (ddr2_dqs),
.ddr2_dqs_n (ddr2_dqs_n),
.ddr2_ck (ddr2_ck),
.ddr2_ck_n (ddr2_ck_n),
.ddr2_ba (ddr2_ba),
.ddr2_a (ddr2_a),
.clk0_tb (ddr2_clk),
.rst0_tb (ddr2_rst),
.usr_clk (wb_clk),
.app_af_afull (app_af_afull),
.app_wdf_afull (app_wdf_afull),
.rd_data_valid (rd_data_valid),
.rd_data_fifo_out (rd_data_fifo_out),
.app_af_wren (app_af_wren),
.app_af_cmd (app_af_cmd),
.app_af_addr (app_af_addr),
.app_wdf_wren (app_wdf_wren),
.app_wdf_data (app_wdf_data),
.app_wdf_mask_data (app_wdf_mask_data),
.phy_init_done (phy_init_done)
.usr_clk (wb_clk),
.app_af_afull (app_af_afull),
.app_wdf_afull (app_wdf_afull),
.rd_data_valid (rd_data_valid),
.rd_data_fifo_out (rd_data_fifo_out),
.app_af_wren (app_af_wren),
.app_af_cmd (app_af_cmd),
.app_af_addr (app_af_addr),
.app_wdf_wren (app_wdf_wren),
.app_wdf_data (app_wdf_data),
.app_wdf_mask_data (app_wdf_mask_data),
.phy_init_done (phy_init_done)
);
 
endmodule // ml501_ddr2_if
endmodule // xilinx_ddr2_if2
 
// Local Variables:
// verilog-library-directories:("." "ddr2_mig")
// verilog-library-extensions:(".v" ".h")
// End:
 
 
module xilinx_ddr2_wb_if_cache_adr_reg
(adr_i, validate, invalidate,
cached_adr_o, cache_hit, adr_valid,
clk, rst);
 
parameter full_adr_width = 32;
parameter word_adr_width = 2; // 4 bytes per word
parameter line_adr_width = 8; // 256 words per "line"
 
parameter tag_width = full_adr_width - line_adr_width - word_adr_width;
input [full_adr_width-1: word_adr_width + line_adr_width] adr_i;
input validate;
input invalidate;
output [full_adr_width-1: word_adr_width + line_adr_width] cached_adr_o;
output cache_hit;
output reg adr_valid;
input clk, rst;
reg [tag_width-1:0] cached_adr;
 
assign cached_adr_o = cached_adr;
 
always @(posedge clk)
if (rst)
cached_adr <= 0;
else if (validate)
cached_adr <= adr_i;
always @(posedge clk)
if (rst)
adr_valid <= 0;
else if (validate)
adr_valid <= 1;
else if (invalidate)
adr_valid <= 0;
assign cache_hit = (adr_i == cached_adr);
 
endmodule // xilinx_ddr2_wb_if_cache_adr_reg
 
module xilinx_ddr2_wb_if_cache_control
( cache_line_addr_valid, cache_line_addr_hit,
wb_req,
cache_write,
writeback_done, fill_done,
start_writeback, start_fill,
cache_line_validate, cache_line_invalidate,
selected_cache_line, selected_cache_line_enc,
wb_clk, wb_rst);
 
parameter num_lines = 16;
parameter num_lines_log2 = 4;
input [num_lines-1:0] cache_line_addr_valid;
input [num_lines-1:0] cache_line_addr_hit;
input wb_req;
input cache_write;
input writeback_done, fill_done;
output reg start_writeback;
output reg start_fill;
output reg [num_lines-1:0] cache_line_validate;
output reg [num_lines-1:0] cache_line_invalidate;
output [num_lines-1:0] selected_cache_line;
output reg [num_lines_log2-1:0] selected_cache_line_enc;
input wb_clk, wb_rst;
reg [num_lines-1:0] dirty;
reg [num_lines-1:0] selected_cache_line_from_miss;
 
reg selected_cache_line_new;
reg invalidate_clean_line;
reg [num_lines-1:0] selected_cache_line_r;
reg [num_lines-1:0] selected_cache_line_r2;
 
reg wb_req_r;
 
wire wb_req_new;
reg wb_req_new_r;
 
always @(posedge wb_clk)
wb_req_r <= wb_req;
assign wb_req_new = wb_req & !wb_req_r;
always @(posedge wb_clk)
wb_req_new_r <= wb_req_new;
 
// Select a cache line when we miss. Currently very simply is round robin
always @(posedge wb_clk)
if (wb_rst)
selected_cache_line_from_miss <= 1;
else if (wb_req_new_r & !(|selected_cache_line_r)) // miss,no line selected
// Shift select bit one
selected_cache_line_from_miss
<= {selected_cache_line_from_miss[num_lines-2:0],
selected_cache_line_from_miss[num_lines-1]};
// Line selection logic, when line address is valid and hit, we select
always @(posedge wb_clk)
if (wb_rst)
selected_cache_line_r <= 0;
else if (wb_req_new)
selected_cache_line_r <= cache_line_addr_valid & cache_line_addr_hit;
else if (wb_req_new_r & !(|selected_cache_line_r))
selected_cache_line_r <= selected_cache_line_from_miss;
 
always @(posedge wb_clk)
selected_cache_line_r2 <= selected_cache_line_r;
 
assign selected_cache_line = selected_cache_line_r2;
 
// A new line of cache has been selected
always @(posedge wb_clk)
if (wb_rst)
selected_cache_line_new <= 0;
else if (wb_req_new & (&(cache_line_addr_valid & cache_line_addr_hit)))
// New line address selected
selected_cache_line_new <= 1;
else if ((!selected_cache_line_new) & wb_req_new_r)
// Didn't select one last time, so we must have forced ourselves to
// select a new one
selected_cache_line_new <= 1;
else if (selected_cache_line_new)
selected_cache_line_new <= 0;
 
always @(posedge wb_clk)
if (wb_rst)
dirty <= 0;
else if (cache_write)
dirty <= dirty | selected_cache_line_r;
else if (writeback_done)
dirty <= dirty & ~(selected_cache_line_r);
// Validate the cache line address in the register when line filled
always @(posedge wb_clk)
if (wb_rst)
cache_line_validate <= 0;
else if (fill_done)
cache_line_validate <= selected_cache_line_r;
else if (|cache_line_validate)
cache_line_validate <= 0;
// Invalidate the cache line address in the register when line written back
always @(posedge wb_clk)
if (wb_rst)
cache_line_invalidate <= 0;
else if (writeback_done | invalidate_clean_line)
cache_line_invalidate <= selected_cache_line_r;
else if (|cache_line_invalidate)
cache_line_invalidate <= 0;
 
// Initiate-writeback logic
always @(posedge wb_clk)
if (wb_rst)
start_writeback <= 0;
else if (selected_cache_line_new & (|(dirty & selected_cache_line_r)) &
(|(selected_cache_line_r & cache_line_addr_valid)) &
!(|(cache_line_addr_hit & selected_cache_line_r)))
start_writeback <= 1;
else if (start_writeback)
start_writeback <= 0;
 
// Invalidate lines which we haven't written to so we can fill them
always @(posedge wb_clk)
if (wb_rst)
invalidate_clean_line <= 0;
else if (invalidate_clean_line)
invalidate_clean_line <= 0;
else if ((selected_cache_line_new) & // New line selected
!(|(dirty & selected_cache_line_r)) & // It's not dirty
// It's valid, but we've selected it so we're trashing it
(|(selected_cache_line_r & cache_line_addr_valid)) &
!(|(cache_line_addr_hit & selected_cache_line_r))) // Not a hit
invalidate_clean_line <= 1;
 
reg invalidate_clean_line_r;
always @(posedge wb_clk)
invalidate_clean_line_r <= invalidate_clean_line;
// Initiate-fill logic
always @(posedge wb_clk)
if (wb_rst)
start_fill <= 0;
else if (((selected_cache_line_new) & // New line selected
// not valid
!(|(cache_line_addr_valid & selected_cache_line_r))) |
writeback_done | invalidate_clean_line_r
)
start_fill <= 1;
else if (start_fill)
start_fill <= 0;
 
// Relies on there only being 4 lines
always @(posedge wb_clk)
if (selected_cache_line_r[0])
selected_cache_line_enc <= 0;
else if (selected_cache_line_r[1])
selected_cache_line_enc <= 1;
else if (selected_cache_line_r[2])
selected_cache_line_enc <= 2;
else if (selected_cache_line_r[3])
selected_cache_line_enc <= 3;
 
 
endmodule // xilinx_ddr2_wb_if_cache_control
/backend/par/bin/ml501.ucf
707,9 → 707,9
## DDR2 clock domain nets
NET "*/xilinx_ddr2_if0/ddr2_read_done" TNM_NET = "DDR2_READ_DONE_GRP";
NET "*/xilinx_ddr2_if0/ddr2_write_done" TNM_NET = "DDR2_WRITE_DONE_GRP";
NET "*/xilinx_ddr2_if0/do_writeback_ddr2_shifter*" TNM_NET = "DDR2_WRITEBACK_SHIFTER";
 
TIMEGRP "DDR2_MC_REGS" = "DDR2_READ_DONE_GRP" "DDR2_WRITE_DONE_GRP" "DDR2_WRITEBACK_SHIFTER";
 
TIMEGRP "DDR2_MC_REGS" = "DDR2_READ_DONE_GRP" "DDR2_WRITE_DONE_GRP";
## System bus (wishbone) domain nets
NET "*/xilinx_ddr2_if0/do_writeback*" TNM_NET = "WB_DO_WRITEBACK";
NET "*/xilinx_ddr2_if0/do_readfrom*" TNM_NET = "WB_DO_READFROM";
/backend/par/bin/Makefile
187,7 → 187,7
 
 
clean:
$(Q)rm -rf *.*
$(Q)rm -rf *.* xlnx_auto*
 
clean-syn:
$(Q)$(MAKE) -C $(SYN_RUN_DIR) distclean
/sim/bin/Makefile
162,6 → 162,7
BOOTROM_SRC=$(shell ls $(BOARD_BOOTROM_SW_DIR)/* | grep -v $(BOOTROM_FILE))
BOOTROM_VERILOG=$(BOARD_BOOTROM_SW_DIR)/$(BOOTROM_FILE)
 
.phony: $(BOOTROM_VERILOG)
bootrom: $(BOOTROM_VERILOG)
 
$(BOOTROM_VERILOG): $(BOOTROM_SRC)
/sw/board/include/board.h
9,11 → 9,14
// Uncomment the appropriate bootloader define. This will effect the bootrom.S
// file, which is compiled and converted into Verilog for inclusion at
// synthesis time. See bootloader/bootloader.S for details on each option.
 
#ifndef PRELOAD_RAM
#define BOOTROM_SPI_FLASH
//#define BOOTROM_GOTO_RESET
//#define BOOTROM_LOOP_AT_ZERO
//#define BOOTROM_LOOP_IN_ROM
#else
#define BOOTROM_GOTO_RESET
#endif
 
// Address bootloader should start from in FLASH
// Last 256KB of 2MB flash - offset 0x1c0000 (2MB-256KB)

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.