Line 20... |
Line 20... |
----------------------------------------------------------------------------------
|
----------------------------------------------------------------------------------
|
library IEEE;
|
library IEEE;
|
use IEEE.STD_LOGIC_1164.ALL;
|
use IEEE.STD_LOGIC_1164.ALL;
|
use ieee.numeric_std.all;
|
use ieee.numeric_std.all;
|
|
|
|
library work;
|
|
use work.wb_init.all; -- initialization package, comment out when not used
|
|
|
-- Deprecated XPS library:
|
-- Deprecated XPS library:
|
--library proc_common_v3_00_a;
|
--library proc_common_v3_00_a;
|
--use proc_common_v3_00_a.proc_common_pkg.all; -- Only for simulation ( pad_power2() )
|
--use proc_common_v3_00_a.proc_common_pkg.all; -- Only for simulation ( pad_power2() )
|
|
|
entity layerPS_top is
|
entity layerPS_top is
|
|
|
generic
|
generic
|
(
|
(
|
NumN : natural := 64; ------- Number of neurons of the layer
|
WBinit : boolean := false;
|
NumIn : natural := 8; ------- Number of inputs of each neuron
|
LNum : natural := 0; ------- layer number (needed for initialization)
|
NbitIn : natural := 12; ------- Bit width of the input data
|
NumN : natural := 34; ------- Number of neurons of the layer
|
NbitW : natural := 8; ------- Bit width of weights and biases
|
NumIn : natural := 27; ------- Number of inputs of each neuron
|
|
NbitIn : natural := 8; ------- Bit width of the input data
|
|
NbitW : natural := 1; ------- Bit width of weights and biases
|
NbitOut : natural := 8; ------- Bit width of the output data
|
NbitOut : natural := 8; ------- Bit width of the output data
|
lra_l : natural := 10; ------- Layer RAM address length. It should value log2(NumN)+log2(NumIn)
|
lra_l : natural := 11; ------- Layer RAM address length. It should value log2(NumN)+log2(NumIn)
|
wra_l : natural := 3; ------- Weight RAM address length. It should value log2(NumIn)
|
wra_l : natural := 5; ------- Weight RAM address length. It should value log2(NumIn)
|
bra_l : natural := 6; ------- Bias RAM address length. It should value log2(NumN)
|
bra_l : natural := 6; ------- Bias RAM address length. It should value log2(NumN)
|
LSbit : natural := 4 ------- Less significant bit of the outputs
|
LSbit : natural := 6 ------- Less significant bit of the outputs
|
);
|
);
|
|
|
port
|
port
|
(
|
(
|
-- Input ports
|
-- Input ports
|
Line 62... |
Line 67... |
|
|
end layerPS_top;
|
end layerPS_top;
|
|
|
architecture Behavioral of layerPS_top is
|
architecture Behavioral of layerPS_top is
|
|
|
--type ramd_type is array (pad_power2(NumN)-1 downto 0) of std_logic_vector(NbitW-1 downto 0); -- Optimal: 32 or 64 spaces -- pad_power2() only for simulation
|
|
--type layer_ram is array (pad_power2(NumIn)-1 downto 0) of ramd_type;
|
|
type ramd_type is array (NumN-1 downto 0) of std_logic_vector(NbitW-1 downto 0); -- Optimal: 32 or 64 spaces
|
type ramd_type is array (NumN-1 downto 0) of std_logic_vector(NbitW-1 downto 0); -- Optimal: 32 or 64 spaces
|
type layer_ram is array (NumIn-1 downto 0) of ramd_type;
|
type layer_ram is array (NumIn-1 downto 0) of ramd_type;
|
type outm_type is array (NumIn-1 downto 0) of std_logic_vector(NbitW-1 downto 0);
|
type outm_type is array (NumIn-1 downto 0) of std_logic_vector(NbitW-1 downto 0);
|
|
|
signal lram : layer_ram; -- Layer RAM. One RAM per input. It stores the weights
|
function fw_init(LNum : natural) return layer_ram is
|
signal breg : ramd_type; -- Bias RAM. They can be RAM because they are not accessed simultaneously
|
variable tmp_arr : layer_ram := (others =>(others => (others => '0')));
|
|
begin
|
|
if WBinit = true then
|
|
for i in 0 to NumIn-1 loop
|
|
for j in 0 to NumN-1 loop
|
|
tmp_arr(i)(j) := w_init(LNum)(i)(j);
|
|
end loop;
|
|
end loop;
|
|
end if;
|
|
return tmp_arr ;
|
|
end fw_init;
|
|
|
|
|
|
|
|
|
|
function fb_init(LNum : natural) return ramd_type is
|
|
variable tmp_arr : ramd_type := (others => (others => '0')) ;
|
|
begin
|
|
if WBinit = true then
|
|
for i in 0 to NumN-1 loop
|
|
tmp_arr(i) := b_init(LNum)(i);
|
|
end loop;
|
|
end if;
|
|
return tmp_arr;
|
|
end fb_init;
|
|
|
|
--function fb_init(LNum : natural) return ramd_type is
|
|
--begin
|
|
-- return ramd_type(b_init(LNum));
|
|
--end fb_init;
|
|
|
|
signal lram : layer_ram := fw_init(LNum); -- Layer RAM. One RAM per input. It stores the weights
|
|
signal breg : ramd_type := fb_init(LNum); -- Bias RAM. They can be RAM because they are not accessed simultaneously
|
signal outm : outm_type; -- RAM outputs to be multiplexed into rdata
|
signal outm : outm_type; -- RAM outputs to be multiplexed into rdata
|
signal m_sel : std_logic_vector(NumIn-1 downto 0); --------- RAM select
|
signal m_sel : std_logic_vector(NumIn-1 downto 0); --------- RAM select
|
signal Wyb : std_logic_vector((NbitW*NumIn)-1 downto 0); -- Weight vectors
|
signal Wyb : std_logic_vector((NbitW*NumIn)-1 downto 0); -- Weight vectors
|
signal bias : std_logic_vector(NbitW-1 downto 0); -------- Bias
|
signal bias : std_logic_vector(NbitW-1 downto 0); -------- Bias
|
signal Nouts : std_logic_vector(NbitOut-1 downto 0); ------ Outputs from neurons
|
signal Nouts : std_logic_vector(NbitOut-1 downto 0); ------ Outputs from neurons
|
signal uaddr : unsigned(lra_l-1 downto 0); -- Unsigned address of weight and bias memories
|
signal uaddr : unsigned(lra_l-1 downto 0); -- Unsigned address of weight and bias memories
|
|
|
-- Señales de control
|
-- Señales de control
|
signal cont : integer range 0 to NumN-1; -- Neuron counter
|
signal cont : integer range 0 to NumN-1; -- Neuron counter
|
signal cntb : integer range 0 to NumN-1; -- Delayed counter for biases
|
signal cntb : integer range 0 to NumN-1; -- Delayed counter for biases
|
signal st : bit; ------- State
|
signal st : bit; ------- State
|
signal en1 : std_logic; -- First step enable
|
signal en1 : std_logic; -- First step enable
|
signal en2 : std_logic; -- Second stage enable
|
signal en2 : std_logic; -- Second stage enable
|
signal en3 : std_logic; -- Shift register enable
|
signal en3 : std_logic; -- Shift register enable
|
signal en_out : std_logic;
|
signal en_out : std_logic;
|
|
|
|
signal input_aux1 : std_logic_vector((NbitIn*NumIn)-1 downto 0);
|
|
signal input_aux2 : std_logic_vector((NbitIn*NumIn)-1 downto 0);
|
|
signal input_aux3 : std_logic_vector((NbitIn*NumIn)-1 downto 0);
|
begin
|
begin
|
|
|
layerPS_inst: entity work.layerPS
|
layerPS_inst: entity work.layerPS
|
generic map
|
generic map
|
(
|
(
|
Line 106... |
Line 144... |
reset => reset,
|
reset => reset,
|
clk => clk,
|
clk => clk,
|
en => en1,
|
en => en1,
|
en2 => en2,
|
en2 => en2,
|
en_r => en3,
|
en_r => en3,
|
inputs => inputs,
|
inputs => input_aux2,
|
Wyb => Wyb,
|
Wyb => Wyb,
|
bias => bias,
|
bias => bias,
|
|
|
-- Output ports
|
-- Output ports
|
en_out => en_out,
|
en_out => en_out,
|
Line 225... |
Line 263... |
st <= '0';
|
st <= '0';
|
en1 <= '0';
|
en1 <= '0';
|
en2 <= '0';
|
en2 <= '0';
|
run_out <= '0';
|
run_out <= '0';
|
else
|
else
|
|
input_aux1 <= inputs;
|
|
input_aux2 <= input_aux1;
|
|
--input_aux3 <=input_aux3 input_aux2;
|
|
|
cntb <= cont; -- Bias counter is delayed to assure correctness of pipeline data
|
cntb <= cont; -- Bias counter is delayed to assure correctness of pipeline data
|
case st is
|
case st is
|
when '0' =>
|
when '0' =>
|
en1 <= '0'; -- en1 is delayed 1 cycle in order to insert a register for Wyb
|
en1 <= '0'; -- en1 is delayed 1 cycle in order to insert a register for Wyb
|
case run_in is
|
case run_in is
|
Line 236... |
Line 278... |
when '0' => st <= '0';
|
when '0' => st <= '0';
|
when others => st <= '0';
|
when others => st <= '0';
|
end case;
|
end case;
|
when '1' =>
|
when '1' =>
|
en1 <= '1'; -- en1 is delayed 1 cycle in order to insert a register for Wyb
|
en1 <= '1'; -- en1 is delayed 1 cycle in order to insert a register for Wyb
|
case cont is
|
if cont = NumN-1 then
|
when (NumN-1) =>
|
|
cont <= 0;
|
cont <= 0;
|
st <= '0';
|
st <= '0';
|
when others =>
|
else
|
cont <= cont +1;
|
cont <= cont +1;
|
end case;
|
end if;
|
end case;
|
end case;
|
|
|
en2 <= en1;
|
en2 <= en1;
|
|
|
run_out <= en3; -- It lasts for 1 cycle, just after the output enable of the layer (when all outputs have just updated)
|
run_out <= en3; -- It lasts for 1 cycle, just after the output enable of the layer (when all outputs have just updated)
|