OpenCores
URL https://opencores.org/ocsvn/artificial_neural_network/artificial_neural_network/trunk

Subversion Repositories artificial_neural_network

Compare Revisions

  • This comparison shows the changes necessary to convert path
    /
    from Rev 7 to Rev 8
    Reverse comparison

Rev 7 → Rev 8

/artificial_neural_network/trunk/ANN_kernel/RTL_VHDL_files/activation_function.vhd
25,6 → 25,7
entity activation_function is
generic
(
lsbit : natural := 10;
f_type : string := "linear"; -- Activation function type
Nbit : natural := 8 -- Bit width
);
41,6 → 42,22
 
architecture Structural of activation_function is
 
component af_sigmoid is
generic
(
Nbit : natural := 8
);
port
(
reset : in std_logic;
clk : in std_logic;
run_in : in std_logic; -- Start and input data validation
inputs : in std_logic_vector(Nbit-1 downto 0); -- Input data
run_out : out std_logic; -- Output data validation, run_in for the next layer
outputs : out std_logic_vector(Nbit-1 downto 0) -- Output data
);
end component;
 
begin
 
-- Linear activation function. It is a direct assignment:
53,7 → 70,7
-- Example 1: sigmoid activation function implemented as a Look-Up-Table (LUT):
Sigmoid_f:
if (f_type = "siglut") generate
siglut_inst: entity work.af_sigmoid
siglut_inst: af_sigmoid
generic map
(
Nbit => Nbit
75,7 → 92,8
siglut_inst: entity work.af_sigmoid2
generic map
(
Nbit => Nbit
Nbit => Nbit,
lsbit => lsbit
)
port map
(
/artificial_neural_network/trunk/ANN_kernel/RTL_VHDL_files/adder_tree.vhd
46,6 → 46,29
 
architecture Behavioral of adder_tree is
 
 
 
component adder_tree is
generic
(
NumIn : integer := 9; -- Number of inputs
Nbit : integer := 12 -- Bit width of the input data
);
 
port
(
-- Input ports
reset : in std_logic;
clk : in std_logic;
en : in std_logic; -- Enable
inputs : in std_logic_vector((Nbit*NumIn)-1 downto 0); -- Input data
 
-- Output ports
en_out : out std_logic; -- Output enable (output data validation)
output : out std_logic_vector(Nbit-1 downto 0) -- Output of the tree adder
);
end component;
 
constant NumIn2 : integer := NumIn/2; -- Number of imputs of the next adder tree layer
 
signal next_en : std_logic := '0'; -- Next adder tree layer enable
107,7 → 130,7
recursion:
if (NumIn > 2) generate
 
sub_adder_tree: entity work.adder_tree
sub_adder_tree: adder_tree
generic map
(
NumIn => (NumIn2)+(NumIn mod 2),
/artificial_neural_network/trunk/ANN_kernel/RTL_VHDL_files/af_sigmoid.vhd
1,99 → 1,99
----------------------------------------------------------------------------------
-- Company: CEI
-- Engineer: Enrique Herrero
--
-- Create Date:
-- Design Name: Configurable ANN
-- Module Name: af_sigmoid - Behavioral
-- Project Name:
-- Target Devices:
-- Tool versions:
-- Description: Sigmoid activation function implemented as a Look-Up-Table (LUT).
--
-- Dependencies:
--
-- Revision:
-- Revision 0.01 - File Created
-- Revision 1 - David Aledo
-- Additional Comments:
--
----------------------------------------------------------------------------------
library IEEE;
use IEEE.STD_LOGIC_1164.ALL;
use ieee.numeric_std.ALL;
use ieee.math_real.all;
 
 
entity af_sigmoid is
generic
(
Nbit : natural := 8
);
port
(
reset : in std_logic;
clk : in std_logic;
run_in : in std_logic; -- Start and input data validation
inputs : in std_logic_vector(Nbit-1 downto 0); -- Input data
run_out : out std_logic; -- Output data validation, run_in for the next layer
outputs : out std_logic_vector(Nbit-1 downto 0) -- Output data
);
end af_sigmoid;
 
 
architecture Behavioral of af_sigmoid is
 
-- Definition of internal modules, constants, signals, etc...
 
-- Sigmoid parameters:
constant f0 : real := 2.0; -- Slope at the origin
constant fr : real := 2.0; -- fr = fmax - fmin
 
signal dataIn: integer range (2**Nbit-1) downto 0; -- To convert std_logic_vector input to integer index for the LUT
type table_t is array(0 to (2**Nbit)-1) of std_logic_vector(Nbit-1 downto 0); -- LUT type
 
-- Function Sigmoidal: generates the Look-Up-Table for the sigmoid activation function:
-- margin: maximun value of x.
function Sigmoidal(margin:real;Nbit:natural) return table_t is
variable scale,x,y,w,t: real;
variable u: integer;
variable fbits: std_logic_vector(Nbit-1 downto 0);
variable table: table_t;
begin
scale := (2.0*margin)/(2.0**Nbit); -- Calculates gap between to points
x := -margin;
for idx in -(2**(Nbit-1)) to (2**(Nbit-1))-1 loop
y := (fr/(1.0+exp(((-4.0*f0)/fr)*x)))-(fr/2.0);
w := y*(2.0**(Nbit-1)); -- Shifts bits to the left
t := round(w);
u := integer(t);
fbits := std_logic_vector(to_signed(u,Nbit));
table(to_integer(to_unsigned(idx+(2**Nbit),Nbit))):= fbits;
x := x+scale;
end loop;
return table;
end Sigmoidal;
signal Table: table_t := Sigmoidal(1.0,Nbit); -- Generation of the LUT (at synthesis time)
 
begin
 
-- Description of the activation function
dataIn <= to_integer(signed(inputs));
 
Activation: process(clk,reset)
begin
if clk'event and clk = '1' then
if reset = '1' then
run_out <= '0';
outputs <= (others => '0');
else
if run_in = '1' then
run_out <='1';
outputs <=Table(dataIn); -- Assigns output value from the LUT
else
run_out <='0';
end if;
end if;
end if;
end process;
end Behavioral;
----------------------------------------------------------------------------------
-- Company: CEI
-- Engineer: Enrique Herrero
--
-- Create Date:
-- Design Name: Configurable ANN
-- Module Name: af_sigmoid - Behavioral
-- Project Name:
-- Target Devices:
-- Tool versions:
-- Description: Sigmoid activation function implemented as a Look-Up-Table (LUT).
--
-- Dependencies:
--
-- Revision:
-- Revision 0.01 - File Created
-- Revision 1 - David Aledo
-- Additional Comments:
--
----------------------------------------------------------------------------------
library IEEE;
use IEEE.STD_LOGIC_1164.ALL;
use ieee.numeric_std.ALL;
use ieee.math_real.all;
 
 
entity af_sigmoid is
generic
(
Nbit : natural := 8
);
port
(
reset : in std_logic;
clk : in std_logic;
run_in : in std_logic; -- Start and input data validation
inputs : in std_logic_vector(Nbit-1 downto 0); -- Input data
run_out : out std_logic; -- Output data validation, run_in for the next layer
outputs : out std_logic_vector(Nbit-1 downto 0) -- Output data
);
end af_sigmoid;
 
 
architecture Behavioral of af_sigmoid is
 
-- Definition of internal modules, constants, signals, etc...
 
-- Sigmoid parameters:
constant f0 : real := 1.0; -- Slope at the origin
constant fr : real := 2.0; -- fr = fmax - fmin
 
signal dataIn: integer range (2**Nbit-1) downto 0; -- To convert std_logic_vector input to integer index for the LUT
type table_t is array(0 to (2**Nbit)-1) of std_logic_vector(Nbit-1 downto 0); -- LUT type
 
-- Function Sigmoidal: generates the Look-Up-Table for the sigmoid activation function:
-- margin: maximun value of x.sim:/ann_tb/ann1/layers_insts(1)/multiple_activation_functions/multiple_activation_function_insts(1)/activation_function_inst/Sigmoid_f/siglut_inst/Activation
function Sigmoidal(margin:real;Nbit:natural) return table_t is
variable scale,x,y,w,t: real;
variable u: integer;
variable fbits: std_logic_vector(Nbit-1 downto 0);
variable table: table_t;
begin
scale := (2.0*margin)/(2.0**Nbit); -- Calculates gap between to points
x := -margin;
for idx in -(2**(Nbit-1)) to (2**(Nbit-1))-1 loop
y := ( fr / (1.0+exp(((-4.0*f0)/fr)*x)) ) - (fr/2.0);
w := y*(2.0**(Nbit-1)); -- Shifts bits to the left
t := round(w);
u := integer(t);
fbits := std_logic_vector(to_signed(u,Nbit));
table(to_integer(to_unsigned(idx+(2**Nbit),Nbit))):= fbits;
x := x+scale;
end loop;
return table;
end Sigmoidal;
signal Table: table_t := Sigmoidal(1.0,Nbit); -- Generation of the LUT (at synthesis time)
 
begin
 
-- Description of the activation function
dataIn <= to_integer(unsigned(inputs));
 
Activation: process(clk,reset)
begin
if clk'event and clk = '1' then
if reset = '1' then
run_out <= '0';
outputs <= (others => '0');
else
if run_in = '1' then
run_out <='1';
outputs <=Table(dataIn); -- Assigns output value from the LUT
else
run_out <='0';
end if;
end if;
end if;
end process;
end Behavioral;
/artificial_neural_network/trunk/ANN_kernel/RTL_VHDL_files/af_sigmoid2.vhd
28,7 → 28,8
entity af_sigmoid2 is
generic
(
Nbit : natural := 8
Nbit : natural := 8;
lsbit : natural := 10
);
port
(
47,7 → 48,7
-- Definition of internal modules, constants, signals, etc...
 
-- Sigmoid parameters:
constant f0 : real := 0.5; -- Slope at the origin
constant f0 : real := 1.0; -- Slope at the origin
constant fr : real := 2.0; -- fr = fmax - fmin
 
signal dataIn: integer range (2**Nbit-1) downto 0; -- To convert std_logic_vector input to integer index for the LUT
54,8 → 55,8
type table_t is array(0 to (2**Nbit)-1) of std_logic_vector(Nbit-1 downto 0); -- LUT type
 
-- Function Sigmoidal: generates the Look-Up-Table for the sigmoid activation function:
-- margin: maximun value of x.
function Sigmoidal(margin:real;Nbit:natural) return table_t is
-- margin: maximum value of input
function Sigmoidal(margin:real;Nbit:natural;lsbit:natural) return table_t is
variable scale,x,y,w,t: real;
variable u: integer;
variable fbits: std_logic_vector(Nbit-1 downto 0);
62,10 → 63,10
variable table: table_t;
begin
scale := (2.0*margin)/(2.0**Nbit); -- Calculates gap between to points
x := -margin;
x := -margin;
for idx in -(2**(Nbit-1)) to (2**(Nbit-1))-1 loop
y := ( fr / (1.0+exp(((-4.0*f0)/fr)*x)) ) - (fr/2.0);
w := y*(2.0**(Nbit-1)); -- Shifts bits to the left
w := y*(2.0**(lsbit)); -- Shifts bits to the left
t := round(w);
u := integer(t);
fbits := std_logic_vector(to_signed(u,Nbit));
74,12 → 75,12
end loop;
return table;
end Sigmoidal;
signal Table: table_t := Sigmoidal(1.0,Nbit); -- Generation of the LUT (at synthesis time)
signal Table: table_t := Sigmoidal(2.0**(Nbit-lsbit-1),Nbit,lsbit); -- Generation of the LUT (at synthesis time)
 
begin
 
-- Description of the activation function
dataIn <= to_integer(signed(inputs));
dataIn <= to_integer(unsigned(inputs));
 
Activacion: process(clk,reset)
begin
/artificial_neural_network/trunk/ANN_kernel/RTL_VHDL_files/ann.vhd
28,6 → 28,7
entity ann is
generic
(
WBinit : boolean := false;
Nlayer : integer := 2; ---- Number of layers
NbitW : natural := 16; ---- Bit width of weights and biases
NumIn : natural := 64; ---- Number of inputs to the network
117,6 → 118,8
first_layerSP_top_inst: entity work.layerSP_top
generic map
(
WBinit => WBinit ,
LNum => 0 ,
NumN => NumN(0), -- Number of neurons in the first layer
NumIn => NumIn, ---- Number of inputs of the first layer
NbitIn => NbitIn, --- Bit width of the input data
178,7 → 181,8
generic map
(
f_type => ftype_v(i-1), -- Activation function type of the previous layer (i-1)
Nbit => NbitO(i-1) --- Bit width of the outputs of the previous layer (i-1)
Nbit => NbitO(i-1), -- Bit width of the outputs of the previous layer (i-1)
lsbit => LSbit(i-1) -- least significant bit of activation function
)
port map
(
202,7 → 206,8
generic map
(
f_type => ftype_v(i-1), -- Activation function type of the previous layer (i-1)
Nbit => NbitO(i-1) --- Bit width of the outputs of the previous layer (i-1)
Nbit => NbitO(i-1), -- Bit width of the outputs of the previous layer (i-1)
lsbit => LSbit(i-1) -- least significant bit of activation function
)
port map
(
226,7 → 231,8
generic map
(
f_type => ftype_v(i-1), -- Activation function type of the previous layer (i-1)
Nbit => NbitO(i-1) --- Bit width of the outputs of the previous layer (i-1)
Nbit => NbitO(i-1), -- Bit width of the outputs of the previous layer (i-1)
lsbit => LSbit(i-1) -- least significant bit of activation function
)
port map
(
245,7 → 251,8
generic map
(
f_type => ftype_v(i-1), -- Activation function type of the previous layer (i-1)
Nbit => NbitO(i-1) --- Bit width of the outputs of the previous layer (i-1)
Nbit => NbitO(i-1) , -- Bit width of the outputs of the previous layer (i-1)
lsbit => LSbit(i-1) -- least significant bit of activation function
)
port map
(
270,7 → 277,8
generic map
(
f_type => ftype_v(i-1),
Nbit => NbitO(i-1)
Nbit => NbitO(i-1),
lsbit => LSbit(i-1) -- least significant bit of activation function
)
port map
(
309,6 → 317,8
layerSP_top_inst: entity work.layerSP_top
generic map
(
WBinit => WBinit ,
LNum => i ,
NumN => NumN(i), --- Number of neurons in layer (i)
NumIn => NumN(i-1), -- Number of inputs, is the number of neurons in previous layer (i-1)
NbitIn => NbitO(i-1), -- Bit width of the input data, is the bit width of output data of layer (i-1)
344,7 → 354,9
if ltype_v(i) = "PS" generate
layerPS_top_inst: entity work.layerPS_top
generic map
(
(
WBinit => WBinit ,
LNum => i ,
NumN => NumN(i), --- Number of neurons in layer (i)
NumIn => NumN(i-1), -- Number of inputs, is the number of neurons in previous layer (i-1)
NbitIn => NbitO(i-1), -- Bit width of the input data, is the bit width of output data of layer (i-1)
380,9 → 392,9
if ltype_v(i) = "PP" generate
-- TODO: instance a full parallel layer. At current version this layer type has not been developed.
-- synthesis translate_off
assert l_type(i) /= "PP"
report "Current version does not accept parallel-input parallel-output (PP) layer type."
severity failure;
--assert l_type(i) /= "PP"
-- report "Current version does not accept parallel-input parallel-output (PP) layer type."
-- severity failure;
-- synthesis translate_on
-- TODO: delete above lines when instantiate the parallel-input parallel-output layer.
end generate;
414,7 → 426,8
generic map
(
f_type => ftype_v(Nlayer-1), -- Activation function type of the last layer (Nlayer-1)
Nbit => NbitO(Nlayer-1) --- Bit width of the outputs of the last layer (Nlayer-1)
Nbit => NbitO(Nlayer-1), --- Bit width of the outputs of the last layer (Nlayer-1)
lsbit => LSbit(Nlayer-1) -- least significant bit of activation function
)
port map
(
435,7 → 448,8
generic map
(
f_type => ftype_v(Nlayer-1), -- Activation function type of the last layer (Nlayer-1)
Nbit => NbitO(Nlayer-1) --- Bit width of the outputs of the last layer (Nlayer-1)
Nbit => NbitO(Nlayer-1), -- Bit width of the outputs of the last layer (Nlayer-1)
lsbit => LSbit(Nlayer-1) -- least significant bit of activation function
)
port map
(
/artificial_neural_network/trunk/ANN_kernel/RTL_VHDL_files/layerPS_top.vhd
22,6 → 22,9
use IEEE.STD_LOGIC_1164.ALL;
use ieee.numeric_std.all;
 
library work;
use work.wb_init.all; -- initialization package, comment out when not used
-- Deprecated XPS library:
--library proc_common_v3_00_a;
--use proc_common_v3_00_a.proc_common_pkg.all; -- Only for simulation ( pad_power2() )
30,15 → 33,17
 
generic
(
NumN : natural := 64; ------- Number of neurons of the layer
NumIn : natural := 8; ------- Number of inputs of each neuron
NbitIn : natural := 12; ------- Bit width of the input data
NbitW : natural := 8; ------- Bit width of weights and biases
WBinit : boolean := false;
LNum : natural := 0; ------- layer number (needed for initialization)
NumN : natural := 34; ------- Number of neurons of the layer
NumIn : natural := 27; ------- Number of inputs of each neuron
NbitIn : natural := 8; ------- Bit width of the input data
NbitW : natural := 1; ------- Bit width of weights and biases
NbitOut : natural := 8; ------- Bit width of the output data
lra_l : natural := 10; ------- Layer RAM address length. It should value log2(NumN)+log2(NumIn)
wra_l : natural := 3; ------- Weight RAM address length. It should value log2(NumIn)
lra_l : natural := 11; ------- Layer RAM address length. It should value log2(NumN)+log2(NumIn)
wra_l : natural := 5; ------- Weight RAM address length. It should value log2(NumIn)
bra_l : natural := 6; ------- Bias RAM address length. It should value log2(NumN)
LSbit : natural := 4 ------- Less significant bit of the outputs
LSbit : natural := 6 ------- Less significant bit of the outputs
);
 
port
64,14 → 69,44
 
architecture Behavioral of layerPS_top is
 
--type ramd_type is array (pad_power2(NumN)-1 downto 0) of std_logic_vector(NbitW-1 downto 0); -- Optimal: 32 or 64 spaces -- pad_power2() only for simulation
--type layer_ram is array (pad_power2(NumIn)-1 downto 0) of ramd_type;
type ramd_type is array (NumN-1 downto 0) of std_logic_vector(NbitW-1 downto 0); -- Optimal: 32 or 64 spaces
type layer_ram is array (NumIn-1 downto 0) of ramd_type;
type outm_type is array (NumIn-1 downto 0) of std_logic_vector(NbitW-1 downto 0);
 
signal lram : layer_ram; -- Layer RAM. One RAM per input. It stores the weights
signal breg : ramd_type; -- Bias RAM. They can be RAM because they are not accessed simultaneously
function fw_init(LNum : natural) return layer_ram is
variable tmp_arr : layer_ram := (others =>(others => (others => '0')));
begin
if WBinit = true then
for i in 0 to NumIn-1 loop
for j in 0 to NumN-1 loop
tmp_arr(i)(j) := w_init(LNum)(i)(j);
end loop;
end loop;
end if;
return tmp_arr ;
end fw_init;
 
 
 
 
function fb_init(LNum : natural) return ramd_type is
variable tmp_arr : ramd_type := (others => (others => '0')) ;
begin
if WBinit = true then
for i in 0 to NumN-1 loop
tmp_arr(i) := b_init(LNum)(i);
end loop;
end if;
return tmp_arr;
end fb_init;
 
--function fb_init(LNum : natural) return ramd_type is
--begin
-- return ramd_type(b_init(LNum));
--end fb_init;
 
signal lram : layer_ram := fw_init(LNum); -- Layer RAM. One RAM per input. It stores the weights
signal breg : ramd_type := fb_init(LNum); -- Bias RAM. They can be RAM because they are not accessed simultaneously
signal outm : outm_type; -- RAM outputs to be multiplexed into rdata
signal m_sel : std_logic_vector(NumIn-1 downto 0); --------- RAM select
signal Wyb : std_logic_vector((NbitW*NumIn)-1 downto 0); -- Weight vectors
79,7 → 114,7
signal Nouts : std_logic_vector(NbitOut-1 downto 0); ------ Outputs from neurons
signal uaddr : unsigned(lra_l-1 downto 0); -- Unsigned address of weight and bias memories
 
 
-- Señales de control
signal cont : integer range 0 to NumN-1; -- Neuron counter
signal cntb : integer range 0 to NumN-1; -- Delayed counter for biases
signal st : bit; ------- State
88,6 → 123,9
signal en3 : std_logic; -- Shift register enable
signal en_out : std_logic;
 
signal input_aux1 : std_logic_vector((NbitIn*NumIn)-1 downto 0);
signal input_aux2 : std_logic_vector((NbitIn*NumIn)-1 downto 0);
signal input_aux3 : std_logic_vector((NbitIn*NumIn)-1 downto 0);
begin
 
layerPS_inst: entity work.layerPS
108,7 → 146,7
en => en1,
en2 => en2,
en_r => en3,
inputs => inputs,
inputs => input_aux2,
Wyb => Wyb,
bias => bias,
 
227,6 → 265,10
en2 <= '0';
run_out <= '0';
else
input_aux1 <= inputs;
input_aux2 <= input_aux1;
--input_aux3 <=input_aux3 input_aux2;
 
cntb <= cont; -- Bias counter is delayed to assure correctness of pipeline data
case st is
when '0' =>
238,13 → 280,12
end case;
when '1' =>
en1 <= '1'; -- en1 is delayed 1 cycle in order to insert a register for Wyb
case cont is
when (NumN-1) =>
cont <= 0;
st <= '0';
when others =>
cont <= cont +1;
end case;
if cont = NumN-1 then
cont <= 0;
st <= '0';
else
cont <= cont +1;
end if;
end case;
 
en2 <= en1;
/artificial_neural_network/trunk/ANN_kernel/RTL_VHDL_files/layerSP_top.vhd
22,6 → 22,9
use IEEE.STD_LOGIC_1164.ALL;
use ieee.numeric_std.all;
 
library work;
use work.wb_init.all; -- initialization package, comment out when not used
 
-- Deprecated XPS library:
--library proc_common_v3_00_a;
--use proc_common_v3_00_a.proc_common_pkg.all; -- Only for simulation ( pad_power2() )
30,15 → 33,17
 
generic
(
NumN : natural := 8; ------- Number of neurons of the layer
NumIn : natural := 64; ------- Number of inputs of each neuron
WBinit : boolean := false;
LNum : natural := 0; ------- layer number (needed for initialization)
NumN : natural := 34; ------- Number of neurons of the layer
NumIn : natural := 27; ------- Number of inputs of each neuron
NbitIn : natural := 8; ------- Bit width of the input data
NbitW : natural := 8; ------- Bit width of weights and biases
NbitOut : natural := 12; ------- Bit width of the output data
lra_l : natural := 10; ------- Layer RAM address length. It should value log2(NumN)+log2(NumIn)
wra_l : natural := 6; ------- Weight RAM address length. It should value log2(NumIn)
bra_l : natural := 3; ------- Bias RAM address length. It should value log2(NumN)
LSbit : natural := 4 ------- Less significant bit of the outputs
NbitW : natural := 32; ------- Bit width of weights and biases
NbitOut : natural := 8; ------- Bit width of the output data
lra_l : natural := 11; ------- Layer RAM address length. It should value log2(NumN)+log2(NumIn)
wra_l : natural := 5; ------- Weight RAM address length. It should value log2(NumIn)
bra_l : natural := 6; ------- Bias RAM address length. It should value log2(NumN)
LSbit : natural := 6 ------- Less significant bit of the outputs
);
 
port
64,14 → 69,39
 
architecture Behavioral of layerSP_top is
 
--type ramd_type is array (pad_power2(NumIn)-1 downto 0) of std_logic_vector(NbitW-1 downto 0); -- Optimal: 32 or 64 spaces
--type layer_ram is array (pad_power2(NumN)-1 downto 0) of ramd_type;
type ramd_type is array (NumIn-1 downto 0) of std_logic_vector(NbitW-1 downto 0); -- Optimal: 32 or 64 spaces
type layer_ram is array (NumN-1 downto 0) of ramd_type;
type outm_type is array (NumN-1 downto 0) of std_logic_vector(NbitW-1 downto 0);
 
signal lram : layer_ram; -- Layer RAM. One RAM per neuron. It stores the weights
signal breg : outm_type; -- Bias registers. They can not be RAM because they are accessed simultaneously
function fw_init(LNum : natural) return layer_ram is
variable tmp_arr : layer_ram := (others => (others => (others => '0'))) ;
begin
if WBinit = true then
for i in 0 to NumIn-1 loop
for j in 0 to NumN-1 loop
tmp_arr(j)(i) := w_init(LNum)(i)(j);
end loop;
end loop;
end if;
return tmp_arr ;
end fw_init;
 
function fb_init(LNum : natural) return outm_type is
variable tmp_arr : outm_type := (others => (others => '0')) ;
begin
if WBinit = true then
for i in 0 to NumN-1 loop
tmp_arr(i) := b_init(LNum)(i);
end loop;
end if;
return tmp_arr;
end fb_init;
 
 
 
signal lram : layer_ram := fw_init(LNum); -- Layer RAM. One RAM per neuron. It stores the weights
signal breg : outm_type := fb_init(LNum); -- Bias registers. They can not be RAM because they are accessed simultaneously
signal outm : outm_type; -- RAM outputs to be multiplexed into rdata
signal m_sel : std_logic_vector(NumN-1 downto 0); -------- RAM select
signal Wyb : std_logic_vector((NbitW*NumN)-1 downto 0); --- Weight vectors
166,7 → 196,14
end if;
end if;
end process;
outm(i) <= lram(i)(to_integer(uaddr(wra_l-1 downto 0))); -- Read all RAM
outm(i) <= lram(i)(to_integer(uaddr(wra_l-1 downto 0))) when (uaddr(wra_l-1 downto 0) <= NumIn-1) else
(others => '0') ; -- Read all RAM
-- In my case I have 27 inputs and 34 neurons in the first layer. When I address
-- the 1 layer's inputs for the second neuron the layer which acccepts a 6 bit wide
-- input address (layer 2) sees the ..1 00100 (34) number and interprets it as an input
-- address (which goes only up to 33) hence the bound check failure
-- fix: I've changed the assignment to a conditional one to check if we are not
-- trying to read a weight of an input higher than the number of this layer's inputs.
end generate;
 
-- Synchronous read including breg:
173,6 → 210,8
process (clk)
begin
if (clk'event and clk = '1') then
--report "addr: " & integer'image(wra_l-1);
--report "addr: " & integer'image(to_integer(uaddr(wra_l-1 downto 0)) );
if (m_en = '1') then
if (b_sel = '1') then
rdata <= breg(to_integer(uaddr(bra_l-1 downto 0))); -- Bias registers selected
246,6 → 285,9
else
cont <= cont +1;
end if;
--elsif (cont = NumIn-1) then -- for layers with more that
-- cont <= 0; -- 1 neuron uncommenting this
-- aux2_en3 <= '1'; -- solved a problem with cont resetting
end if;
en2 <= en1;
if (cont = 0 and run_in = '1') then
/artificial_neural_network/trunk/ANN_kernel/RTL_VHDL_files/layers_pkg.vhd
1,301 → 1,304
----------------------------------------------------------------------------------
-- Company: CEI - UPM
-- Engineer: David Aledo
--
-- Create Date: 01.10.2015
-- Design Name: Configurable ANN
-- Pakage Name: layers_pkg
-- Project Name:
-- Target Devices:
-- Tool Versions:
-- Description: define array types for generics, functions to give them values from
-- string generics, and other help functions
-- Dependencies:
--
-- Revision:
-- Revision 0.01 - File Created
-- Additional Comments:
--
----------------------------------------------------------------------------------
 
library IEEE;
use IEEE.STD_LOGIC_1164.all;
 
--library proc_common_v3_00_a; -- Deprecated libray from XPS tool
--use proc_common_v3_00_a.proc_common_pkg.all;
 
package layers_pkg is
 
-- Array types for generics:
type int_vector is array (natural range <>) of integer; -- Generic integer vector
type ltype_vector is array (integer range <>) of string(1 to 2); -- Layer type vector
type ftype_vector is array (integer range <>) of string(1 to 6); -- Activation function type vector
-- Note: these strings cannot be unconstrined
 
-- Functions to assign values to vector types from string generics:
-- Arguments:
-- str_v : string to be converted
-- n : number of elements of the vector
-- Return: assigned vector
function assign_ints(str_v : string; n : integer) return int_vector;
function assign_ltype(str_v : string; n : integer) return ltype_vector;
function assign_ftype(str_v : string; n : integer) return ftype_vector;
 
-- Other functions:
 
-- Argument: c : character to be checked
-- Return: TRUE if c is 0, 1, 2, 3, 4, 5, 6, 7, 8 or 9
function is_digit(c : character) return boolean;
 
-- Base two logarithm for int_vector:
-- Arguments:
-- v : integer vector
-- n : number of elements of the vector
-- Return : integer vector of the base two logarithms of each elment of v
function log2(v : int_vector; n : integer) return int_vector;
 
-- Calculate the total weight and bias memory address length:
-- Arguments:
-- NumIn : number of inputs of the network
-- NumN : number of neurons of each layer
-- n : number of layers (number of elements of NumN)
-- Return: total weight and bias memory address length (integer)
function calculate_addr_l(NumIn : integer; NumN : int_vector; n : integer) return integer;
 
-- Assign the weight and bias memory address lenght of each layer:
-- Arguments:
-- NumIn : number of inputs of the network
-- NumN : number of neurons of each layer
-- n : number of layers (number of elements of NumN and the return integer vector)
-- Return: weight and bias memory address lenght of each layer (integer vector)
function assign_addrl(NumIn : integer; NumN : int_vector; n : integer) return int_vector;
 
-- Calculate the maximum of the multiplications of two vectors element by element
-- Arguments:
-- v1 : input vector 1
-- v2 : input vector 2
-- Return: maximum of the multiplications of two vectors element by element
function calculate_max_mul(v1 : int_vector; v2 : int_vector) return integer;
 
-- Returns the max value of the input integer vector:
function calculate_max(v : int_vector) return integer;
 
-- Adding needed functions from the deprecated libray proc_common_v3_00_a:
function max2 (num1, num2 : integer) return integer;
function log2(x : natural) return integer;
 
end layers_pkg;
 
package body layers_pkg is
 
function max2 (num1, num2 : integer) return integer is
begin
if num1 >= num2 then
return num1;
else
return num2;
end if;
end function max2;
 
-- Function log2 -- returns number of bits needed to encode x choices
-- x = 0 returns 0
-- x = 1 returns 0
-- x = 2 returns 1
-- x = 4 returns 2, etc.
function log2(x : natural) return integer is
variable i : integer := 0;
variable val: integer := 1;
begin
if x = 0 then
return 0;
else
for j in 0 to 29 loop -- for loop for XST
if val >= x then null;
else
i := i+1;
val := val*2;
end if;
end loop;
-- Fix per CR520627 XST was ignoring this anyway and printing a
-- Warning in SRP file. This will get rid of the warning and not
-- impact simulation.
-- synthesis translate_off
assert val >= x
report "Function log2 received argument larger" &
" than its capability of 2^30. "
severity failure;
-- synthesis translate_on
return i;
end if;
end function log2;
 
 
function is_digit(c : character) return boolean is
begin
case c is
when '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' => return true;
when others => return false;
end case;
end is_digit;
 
-- Assign values to a integer vector from a string:
-- Arguments:
-- str_v : string to be converted
-- n : number of elements of the vector
-- Return: assigned integer vector
function assign_ints(str_v : string; n : integer) return int_vector is
variable i : integer := n-1; ---- element counter
variable d_power : integer := 1; -- decimal power
variable ret : int_vector(n-1 downto 0) := (others => 0); -- return value
begin
for c in str_v'length downto 1 loop -- read every character in str_v
if str_v(c) = ' ' then -- a space separates a new element
assert i > 0
report "Error in assign_ints: number of elements in string is greater than n."
severity error;
i := i -1; -- decrease element counter to start calculate a new element
d_power := 1; -- reset the decimal power to 1
else
assert is_digit(str_v(c)) -- assert the new character is a digit
report "Error in assign_ints: character " & str_v(c) & " is not a digit."
severity error;
-- add the value of the new charactar to the element calculation ( + ("<new_digit>" - "0") * d_power):
ret(i) := ret(i) + (character'pos(str_v(c))-character'pos('0'))*d_power;
d_power := d_power*10; -- increase the decimal power for the next digit
end if;
end loop;
assert i = 0
report "Error in assign_ints: number of elements in string is less than n."
severity error;
return ret;
end assign_ints;
 
-- Assign values to an activation function type vector from a string:
-- Arguments:
-- str_v : string to be converted
-- n : number of elements of the vector
-- Return: assigned activation function type vector
function assign_ftype(str_v : string; n : integer) return ftype_vector is
variable i : integer := 0; -- element counter
variable l : integer := 1; -- element length counter
variable ret : ftype_vector(n-1 downto 0) := (others => "linear"); -- return value
begin
for c in 1 to str_v'length loop -- read every character in str_v
if str_v(c) = ' ' then -- a space separates a new element
i := i +1; -- increase element counter to start calculate a new element
l := 1; -- reset element length counter
else
ret(i)(l) := str_v(c);
l := l +1; -- increase element length counter
end if;
end loop;
assert i = n-1
report "Error in assign_ftype: number of elements in string is less than n."
severity error;
return ret;
end assign_ftype;
 
-- Assign values to an layer type vector from a string:
-- Arguments:
-- str_v : string to be converted
-- n : number of elements of the vector
-- Return: assigned layer type vector
function assign_ltype(str_v : string; n : integer) return ltype_vector is
variable i : integer := 0; -- element counter
variable l : integer := 1; -- element length counter
variable ret : ltype_vector(n-1 downto 0) := (others => "SP"); -- return value
begin
for c in 1 to str_v'length loop
if str_v(c) = ' ' then -- a space separates a new element
i := i +1; -- increase element counter to start calculate a new element
l := 1; -- reset element length counter
else
assert str_v(c) = 'P' or str_v(c) = 'S'
report "Error in assign_ltype: character " & str_v(c) & " is not 'P' (parallel) or 'S' (serial)."
severity error;
ret(i)(l) := str_v(c);
l := l +1; -- increase element length counter
end if;
end loop;
assert i = n-1
report "Error in assign_ltype: number of elements do not coincide with number of introduced elements."
severity error;
return ret;
end assign_ltype;
 
-- Calculate the total weight and bias memory address length:
-- Arguments:
-- NumIn : number of inputs of the network
-- NumN : number of neurons of each layer
-- n : number of layers (number of elements of NumN)
-- Return: total weight and bias memory address length (integer)
function calculate_addr_l(NumIn : integer; NumN : int_vector; n : integer) return integer is -- matrix + b_sel
variable addr_l : integer := log2(NumIn)+log2(NumN(0)); -- return value. Initialized with the weight memory length of the first layer
begin
-- Calculate the maximum of the weight memory length:
for i in 1 to n-1 loop
addr_l := max2( addr_l, log2(NumN(i-1))+log2(NumN(i)) );
end loop;
addr_l := addr_l +1; -- add bias select bit
return addr_l;
end calculate_addr_l;
 
-- Base two logarithm for int_vector:
-- Arguments:
-- v : integer vector
-- n : number of elements of the vector
-- Return : integer vector of the base two logarithms of each elment of v
function log2(v : int_vector; n : integer) return int_vector is
variable ret : int_vector(n-1 downto 0); -- return value
begin
-- for each element of v, calculate its base two logarithm:
for i in 0 to n-1 loop
ret(i) := log2(v(i));
end loop;
return ret;
end log2;
 
-- Assign the weight and bias memory address lenght of each layer:
-- Arguments:
-- NumIn : number of inputs of the network
-- NumN : number of neurons of each layer
-- n : number of layers (number of elements of NumN and the return integer vector)
-- Return: weight and bias memory address lenght of each layer (integer vector)
function assign_addrl(NumIn : integer; NumN : int_vector; n : integer) return int_vector is
variable ret : int_vector(n-1 downto 0); -- return value
begin
ret(0) := log2(NumIn)+log2(NumN(0)); -- Weight memory length of the first layer
for i in 1 to n-1 loop
ret(i) := log2(NumN(i-1))+log2(NumN(i));
end loop;
return ret;
end assign_addrl;
 
-- Returns the max value of the input integer vector:
function calculate_max(v : int_vector) return integer is
variable ac_max : integer := 0; -- return value
begin
for i in 0 to v'length-1 loop
ac_max := max2(ac_max,v(i));
end loop;
return ac_max;
end calculate_max;
 
-- Calculate the maximum of the multiplications of two vectors element by element
-- Arguments:
-- v1 : input vector 1
-- v2 : input vector 2
-- Return: maximum of the multiplications of two vectors element by element
function calculate_max_mul(v1 : int_vector; v2 : int_vector) return integer is
variable ac_max : integer := 0;
begin
assert v1'length = v2'length
report "Error in calculate_max_mul: vector's length do not coincide."
severity error;
for i in 0 to v1'length-1 loop
ac_max := max2(ac_max,v1(i)*v2(i));
end loop;
return ac_max;
end calculate_max_mul;
 
end layers_pkg;
----------------------------------------------------------------------------------
-- Company: CEI - UPM
-- Engineer: David Aledo
--
-- Create Date: 01.10.2015
-- Design Name: Configurable ANN
-- Pakage Name: layers_pkg
-- Project Name:
-- Target Devices:
-- Tool Versions:
-- Description: define array types for generics, functions to give them values from
-- string generics, and other help functions
-- Dependencies:
--
-- Revision:
-- Revision 0.01 - File Created
-- Additional Comments:
--
----------------------------------------------------------------------------------
 
library IEEE;
use IEEE.STD_LOGIC_1164.all;
use IEEE.numeric_std.all;
 
--library proc_common_v3_00_a; -- Deprecated libray from XPS tool
--use proc_common_v3_00_a.proc_common_pkg.all;
 
package layers_pkg is
 
-- Array types for generics:
type int_vector is array (natural range <>) of integer; -- Generic integer vector
type ltype_vector is array (integer range <>) of string(1 to 2); -- Layer type vector
type ftype_vector is array (integer range <>) of string(1 to 6); -- Activation function type vector
-- Note: these strings cannot be unconstrined
 
-- Functions to assign values to vector types from string generics:
-- Arguments:
-- str_v : string to be converted
-- n : number of elements of the vector
-- Return: assigned vector
function assign_ints(str_v : string; n : integer) return int_vector;
function assign_ltype(str_v : string; n : integer) return ltype_vector;
function assign_ftype(str_v : string; n : integer) return ftype_vector;
 
-- Other functions:
 
-- Argument: c : character to be checked
-- Return: TRUE if c is 0, 1, 2, 3, 4, 5, 6, 7, 8 or 9
function is_digit(c : character) return boolean;
 
 
-- Base two logarithm for int_vector:
-- Arguments:
-- v : integer vector
-- n : number of elements of the vector
-- Return : integer vector of the base two logarithms of each elment of v
function log2(v : int_vector; n : integer) return int_vector;
 
-- Calculate the total weight and bias memory address length:
-- Arguments:
-- NumIn : number of inputs of the network
-- NumN : number of neurons of each layer
-- n : number of layers (number of elements of NumN)
-- Return: total weight and bias memory address length (integer)
function calculate_addr_l(NumIn : integer; NumN : int_vector; n : integer) return integer;
 
-- Assign the weight and bias memory address lenght of each layer:
-- Arguments:
-- NumIn : number of inputs of the network
-- NumN : number of neurons of each layer
-- n : number of layers (number of elements of NumN and the return integer vector)
-- Return: weight and bias memory address lenght of each layer (integer vector)
function assign_addrl(NumIn : integer; NumN : int_vector; n : integer) return int_vector;
 
-- Calculate the maximum of the multiplications of two vectors element by element
-- Arguments:
-- v1 : input vector 1
-- v2 : input vector 2
-- Return: maximum of the multiplications of two vectors element by element
function calculate_max_mul(v1 : int_vector; v2 : int_vector) return integer;
 
-- Returns the max value of the input integer vector:
function calculate_max(v : int_vector) return integer;
 
-- Adding needed functions from the deprecated libray proc_common_v3_00_a:
function max2 (num1, num2 : integer) return integer;
function log2(x : natural) return integer;
 
end layers_pkg;
 
package body layers_pkg is
 
function max2 (num1, num2 : integer) return integer is
begin
if num1 >= num2 then
return num1;
else
return num2;
end if;
end function max2;
 
-- Function log2 -- returns number of bits needed to encode x choices
-- x = 0 returns 0
-- x = 1 returns 0
-- x = 2 returns 1
-- x = 4 returns 2, etc.
function log2(x : natural) return integer is
variable i : integer := 0;
variable val: integer := 1;
begin
if x = 0 then
return 0;
else
for j in 0 to 29 loop -- for loop for XST
if val >= x then null;
else
i := i+1;
val := val*2;
end if;
end loop;
-- Fix per CR520627 XST was ignoring this anyway and printing a
-- Warning in SRP file. This will get rid of the warning and not
-- impact simulation.
-- synthesis translate_off
assert val >= x
report "Function log2 received argument larger" &
" than its capability of 2^30. "
severity failure;
-- synthesis translate_on
return i;
end if;
end function log2;
 
 
function is_digit(c : character) return boolean is
begin
case c is
when '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' => return true;
when others => return false;
end case;
end is_digit;
 
 
-- Assign values to a integer vector from a string:
-- Arguments:
-- str_v : string to be converted
-- n : number of elements of the vector
-- Return: assigned integer vector
function assign_ints(str_v : string; n : integer) return int_vector is
variable i : integer := n-1; ---- element counter
variable d_power : integer := 1; -- decimal power
variable ret : int_vector(n-1 downto 0) := (others => 0); -- return value
begin
for c in str_v'length downto 1 loop -- read every character in str_v
if str_v(c) = ' ' then -- a space separates a new element
assert i > 0
report "Error in assign_ints: number of elements in string is greater than n."
severity error;
i := i -1; -- decrease element counter to start calculate a new element
d_power := 1; -- reset the decimal power to 1
else
assert is_digit(str_v(c)) -- assert the new character is a digit
report "Error in assign_ints: character " & str_v(c) & " is not a digit."
severity error;
-- add the value of the new charactar to the element calculation ( + ("<new_digit>" - "0") * d_power):
ret(i) := ret(i) + (character'pos(str_v(c))-character'pos('0'))*d_power;
d_power := d_power*10; -- increase the decimal power for the next digit
end if;
end loop;
assert i = 0
report "Error in assign_ints: number of elements in string is less than n."
severity error;
return ret;
end assign_ints;
 
-- Assign values to an activation function type vector from a string:
-- Arguments:
-- str_v : string to be converted
-- n : number of elements of the vector
-- Return: assigned activation function type vector
function assign_ftype(str_v : string; n : integer) return ftype_vector is
variable i : integer := 0; -- element counter
variable l : integer := 1; -- element length counter
variable ret : ftype_vector(n-1 downto 0) := (others => "linear"); -- return value
begin
for c in 1 to str_v'length loop -- read every character in str_v
if str_v(c) = ' ' then -- a space separates a new element
i := i +1; -- increase element counter to start calculate a new element
l := 1; -- reset element length counter
else
ret(i)(l) := str_v(c);
l := l +1; -- increase element length counter
end if;
end loop;
assert i = n-1
report "Error in assign_ftype: number of elements in string is less than n."
severity error;
return ret;
end assign_ftype;
 
-- Assign values to an layer type vector from a string:
-- Arguments:
-- str_v : string to be converted
-- n : number of elements of the vector
-- Return: assigned layer type vector
function assign_ltype(str_v : string; n : integer) return ltype_vector is
variable i : integer := 0; -- element counter
variable l : integer := 1; -- element length counter
variable ret : ltype_vector(n-1 downto 0) := (others => "SP"); -- return value
begin
for c in 1 to str_v'length loop
if str_v(c) = ' ' then -- a space separates a new element
i := i +1; -- increase element counter to start calculate a new element
l := 1; -- reset element length counter
else
assert str_v(c) = 'P' or str_v(c) = 'S'
report "Error in assign_ltype: character " & str_v(c) & " is not 'P' (parallel) or 'S' (serial)."
severity error;
ret(i)(l) := str_v(c);
l := l +1; -- increase element length counter
end if;
end loop;
assert i = n-1
report "Error in assign_ltype: number of elements do not coincide with number of introduced elements."
severity error;
return ret;
end assign_ltype;
 
-- Calculate the total weight and bias memory address length:
-- Arguments:
-- NumIn : number of inputs of the network
-- NumN : number of neurons of each layer
-- n : number of layers (number of elements of NumN)
-- Return: total weight and bias memory address length (integer)
function calculate_addr_l(NumIn : integer; NumN : int_vector; n : integer) return integer is -- matrix + b_sel
variable addr_l : integer := log2(NumIn)+log2(NumN(0)); -- return value. Initialized with the weight memory length of the first layer
begin
-- Calculate the maximum of the weight memory length:
for i in 1 to n-1 loop
addr_l := max2( addr_l, log2(NumN(i-1)+log2(NumN(i))) );
end loop;
addr_l := addr_l +1; -- add bias select bit
return addr_l;
end calculate_addr_l;
 
-- Base two logarithm for int_vector:
-- Arguments:
-- v : integer vector
-- n : number of elements of the vector
-- Return : integer vector of the base two logarithms of each elment of v
function log2(v : int_vector; n : integer) return int_vector is
variable ret : int_vector(n-1 downto 0); -- return value
begin
-- for each element of v, calculate its base two logarithm:
for i in 0 to n-1 loop
ret(i) := log2(v(i));
end loop;
return ret;
end log2;
 
-- Assign the weight and bias memory address lenght of each layer:
-- Arguments:
-- NumIn : number of inputs of the network
-- NumN : number of neurons of each layer
-- n : number of layers (number of elements of NumN and the return integer vector)
-- Return: weight and bias memory address lenght of each layer (integer vector)
function assign_addrl(NumIn : integer; NumN : int_vector; n : integer) return int_vector is
variable ret : int_vector(n-1 downto 0); -- return value
begin
ret(0) := log2(NumIn)+log2(NumN(0)); -- Weight memory length of the first layer
for i in 1 to n-1 loop
ret(i) := log2(NumN(i-1))+log2(NumN(i));
end loop;
return ret;
end assign_addrl;
 
-- Returns the max value of the input integer vector:
function calculate_max(v : int_vector) return integer is
variable ac_max : integer := 0; -- return value
begin
for i in 0 to v'length-1 loop
ac_max := max2(ac_max,v(i));
end loop;
return ac_max;
end calculate_max;
 
-- Calculate the maximum of the multiplications of two vectors element by element
-- Arguments:
-- v1 : input vector 1
-- v2 : input vector 2
-- Return: maximum of the multiplications of two vectors element by element
function calculate_max_mul(v1 : int_vector; v2 : int_vector) return integer is
variable ac_max : integer := 0;
begin
assert v1'length = v2'length
report "Error in calculate_max_mul: vector's length do not coincide."
severity error;
for i in 0 to v1'length-1 loop
ac_max := max2(ac_max,v1(i)*v2(i));
end loop;
return ac_max;
end calculate_max_mul;
 
end layers_pkg;
/artificial_neural_network/trunk/ANN_kernel/RTL_VHDL_files/support_pkg.vhd
0,0 → 1,37
library IEEE;
use IEEE.STD_LOGIC_1164.all;
use IEEE.numeric_std.all;
use work.layers_pkg.all;
package support_pkg is
 
-- generic constants:
 
constant NbitIn : natural := 12;
constant LSB_In : natural := 8;
constant Nbit : natural := 12;
constant NbitW : natural := 24;
constant LSB_OUT : natural := 8;
constant Nlayer : natural := 3;
 
constant NbitOut : integer := 12 ;
constant NumIn : integer := 1;
constant NumN : int_vector(Nlayer-1 downto 0) := assign_ints("2 3 1",Nlayer);
constant LSbit : int_vector(Nlayer-1 downto 0) := assign_ints("8 8 8",Nlayer);
constant NbitO : int_vector(Nlayer-1 downto 0) := assign_ints("12 12 12",Nlayer);
constant l_type : string := "SP PS SP"; -- Layer type of each layer
constant f_type : string := "siglu2 siglu2 siglu2"; -- Activation function type of each layer
 
function real2stdlv (bitW : natural; din : real) return std_logic_vector;
 
end support_pkg;
 
package body support_pkg is
 
function real2stdlv (bitW : natural; din : real) return std_logic_vector is
variable vres : signed(bitW-1 downto 0) := (others => '0');
begin -- real2stdlv
vres:= to_signed(integer(din*(2.0**(LSB_OUT))), bitW);
return std_logic_vector(vres);
end real2stdlv;
 
end support_pkg;
/artificial_neural_network/trunk/ANN_kernel/RTL_VHDL_files/wb_init.vhd
0,0 → 1,72
library ieee;
use ieee.std_logic_1164.all;
use ieee.numeric_std.all;
library work;
use work.support_pkg.all;
use work.layers_pkg.all;
package wb_init is
type ramd_type is array (3 downto 0) of std_logic_vector(NbitW-1 downto 0);
type layer_ram is array (3 downto 0) of ramd_type;
type w_ram is array (integer range <>) of layer_ram;
type b_type is array (integer range <>) of ramd_type;
constant w_init : w_ram :=
(
0 => (
0 => (
0 => real2stdlv(NbitW,-0.8964),
1 => real2stdlv(NbitW,-2.6600),
others =>(others => '0')
),
others=>(others =>(others => '0'))
),
1 => (
0 => (
0 => real2stdlv(NbitW,-5.6056),
1 => real2stdlv(NbitW,-1.5274),
2 => real2stdlv(NbitW,-8.4909),
others =>(others => '0')
),
1 => (
0 => real2stdlv(NbitW,1.0885),
1 => real2stdlv(NbitW,0.7244),
2 => real2stdlv(NbitW,3.8977),
others =>(others => '0')
),
others=>(others =>(others => '0'))
),
2 => (
0 => (
0 => real2stdlv(NbitW,6.0449),
others =>(others => '0')
),
1 => (
0 => real2stdlv(NbitW,-2.8724),
others =>(others => '0')
),
2 => (
0 => real2stdlv(NbitW,-5.0188),
others =>(others => '0')
),
others=>(others =>(others => '0'))
)
);
 
constant b_init : b_type :=
(
0 => (
0 => real2stdlv(NbitW,(2.0**LSB_OUT)*(0.3704)),
1 => real2stdlv(NbitW,(2.0**LSB_OUT)*(0.7149)),
others =>(others => '0')
),
1 => (
0 => real2stdlv(NbitW,(2.0**LSB_OUT)*(2.8121)),
1 => real2stdlv(NbitW,(2.0**LSB_OUT)*(0.3690)),
2 => real2stdlv(NbitW,(2.0**LSB_OUT)*(2.4685)),
others =>(others => '0')
),
2 => (
0 => real2stdlv(NbitW,(2.0**LSB_OUT)*(0.0784)),
others =>(others => '0')
)
);
end wb_init;
/artificial_neural_network/trunk/test_bench/data_in.txt
0,0 → 1,500
-1.000000
-0.979798
-0.959596
-0.939394
-0.919192
-0.898990
-0.878788
-0.858586
-0.838384
-0.818182
-0.797980
-0.777778
-0.757576
-0.737374
-0.717172
-0.696970
-0.676768
-0.656566
-0.636364
-0.616162
-0.595960
-0.575758
-0.555556
-0.535354
-0.515152
-0.494949
-0.474747
-0.454545
-0.434343
-0.414141
-0.393939
-0.373737
-0.353535
-0.333333
-0.313131
-0.292929
-0.272727
-0.252525
-0.232323
-0.212121
-0.191919
-0.171717
-0.151515
-0.131313
-0.111111
-0.090909
-0.070707
-0.050505
-0.030303
-0.010101
0.010101
0.030303
0.050505
0.070707
0.090909
0.111111
0.131313
0.151515
0.171717
0.191919
0.212121
0.232323
0.252525
0.272727
0.292929
0.313131
0.333333
0.353535
0.373737
0.393939
0.414141
0.434343
0.454545
0.474747
0.494949
0.515152
0.535354
0.555556
0.575758
0.595960
0.616162
0.636364
0.656566
0.676768
0.696970
0.717172
0.737374
0.757576
0.777778
0.797980
0.818182
0.838384
0.858586
0.878788
0.898990
0.919192
0.939394
0.959596
0.979798
1.000000
-1.000000
-0.979798
-0.959596
-0.939394
-0.919192
-0.898990
-0.878788
-0.858586
-0.838384
-0.818182
-0.797980
-0.777778
-0.757576
-0.737374
-0.717172
-0.696970
-0.676768
-0.656566
-0.636364
-0.616162
-0.595960
-0.575758
-0.555556
-0.535354
-0.515152
-0.494949
-0.474747
-0.454545
-0.434343
-0.414141
-0.393939
-0.373737
-0.353535
-0.333333
-0.313131
-0.292929
-0.272727
-0.252525
-0.232323
-0.212121
-0.191919
-0.171717
-0.151515
-0.131313
-0.111111
-0.090909
-0.070707
-0.050505
-0.030303
-0.010101
0.010101
0.030303
0.050505
0.070707
0.090909
0.111111
0.131313
0.151515
0.171717
0.191919
0.212121
0.232323
0.252525
0.272727
0.292929
0.313131
0.333333
0.353535
0.373737
0.393939
0.414141
0.434343
0.454545
0.474747
0.494949
0.515152
0.535354
0.555556
0.575758
0.595960
0.616162
0.636364
0.656566
0.676768
0.696970
0.717172
0.737374
0.757576
0.777778
0.797980
0.818182
0.838384
0.858586
0.878788
0.898990
0.919192
0.939394
0.959596
0.979798
1.000000
-1.000000
-0.979798
-0.959596
-0.939394
-0.919192
-0.898990
-0.878788
-0.858586
-0.838384
-0.818182
-0.797980
-0.777778
-0.757576
-0.737374
-0.717172
-0.696970
-0.676768
-0.656566
-0.636364
-0.616162
-0.595960
-0.575758
-0.555556
-0.535354
-0.515152
-0.494949
-0.474747
-0.454545
-0.434343
-0.414141
-0.393939
-0.373737
-0.353535
-0.333333
-0.313131
-0.292929
-0.272727
-0.252525
-0.232323
-0.212121
-0.191919
-0.171717
-0.151515
-0.131313
-0.111111
-0.090909
-0.070707
-0.050505
-0.030303
-0.010101
0.010101
0.030303
0.050505
0.070707
0.090909
0.111111
0.131313
0.151515
0.171717
0.191919
0.212121
0.232323
0.252525
0.272727
0.292929
0.313131
0.333333
0.353535
0.373737
0.393939
0.414141
0.434343
0.454545
0.474747
0.494949
0.515152
0.535354
0.555556
0.575758
0.595960
0.616162
0.636364
0.656566
0.676768
0.696970
0.717172
0.737374
0.757576
0.777778
0.797980
0.818182
0.838384
0.858586
0.878788
0.898990
0.919192
0.939394
0.959596
0.979798
1.000000
-1.000000
-0.979798
-0.959596
-0.939394
-0.919192
-0.898990
-0.878788
-0.858586
-0.838384
-0.818182
-0.797980
-0.777778
-0.757576
-0.737374
-0.717172
-0.696970
-0.676768
-0.656566
-0.636364
-0.616162
-0.595960
-0.575758
-0.555556
-0.535354
-0.515152
-0.494949
-0.474747
-0.454545
-0.434343
-0.414141
-0.393939
-0.373737
-0.353535
-0.333333
-0.313131
-0.292929
-0.272727
-0.252525
-0.232323
-0.212121
-0.191919
-0.171717
-0.151515
-0.131313
-0.111111
-0.090909
-0.070707
-0.050505
-0.030303
-0.010101
0.010101
0.030303
0.050505
0.070707
0.090909
0.111111
0.131313
0.151515
0.171717
0.191919
0.212121
0.232323
0.252525
0.272727
0.292929
0.313131
0.333333
0.353535
0.373737
0.393939
0.414141
0.434343
0.454545
0.474747
0.494949
0.515152
0.535354
0.555556
0.575758
0.595960
0.616162
0.636364
0.656566
0.676768
0.696970
0.717172
0.737374
0.757576
0.777778
0.797980
0.818182
0.838384
0.858586
0.878788
0.898990
0.919192
0.939394
0.959596
0.979798
1.000000
-1.000000
-0.979798
-0.959596
-0.939394
-0.919192
-0.898990
-0.878788
-0.858586
-0.838384
-0.818182
-0.797980
-0.777778
-0.757576
-0.737374
-0.717172
-0.696970
-0.676768
-0.656566
-0.636364
-0.616162
-0.595960
-0.575758
-0.555556
-0.535354
-0.515152
-0.494949
-0.474747
-0.454545
-0.434343
-0.414141
-0.393939
-0.373737
-0.353535
-0.333333
-0.313131
-0.292929
-0.272727
-0.252525
-0.232323
-0.212121
-0.191919
-0.171717
-0.151515
-0.131313
-0.111111
-0.090909
-0.070707
-0.050505
-0.030303
-0.010101
0.010101
0.030303
0.050505
0.070707
0.090909
0.111111
0.131313
0.151515
0.171717
0.191919
0.212121
0.232323
0.252525
0.272727
0.292929
0.313131
0.333333
0.353535
0.373737
0.393939
0.414141
0.434343
0.454545
0.474747
0.494949
0.515152
0.535354
0.555556
0.575758
0.595960
0.616162
0.636364
0.656566
0.676768
0.696970
0.717172
0.737374
0.757576
0.777778
0.797980
0.818182
0.838384
0.858586
0.878788
0.898990
0.919192
0.939394
0.959596
0.979798
1.000000
/artificial_neural_network/trunk/test_bench/data_out_oct.txt
0,0 → 1,500
-0.045413
-0.086976
-0.132375
-0.181645
-0.234671
-0.291146
-0.350528
-0.412014
-0.474546
-0.536847
-0.597512
-0.655131
-0.708433
-0.756422
-0.798475
-0.834366
-0.864242
-0.888536
-0.907862
-0.922914
-0.934374
-0.942856
-0.948870
-0.952808
-0.954940
-0.955417
-0.954272
-0.951424
-0.946672
-0.939696
-0.930051
-0.917178
-0.900424
-0.879088
-0.852495
-0.820091
-0.781559
-0.736907
-0.686535
-0.631217
-0.572029
-0.510212
-0.447014
-0.383550
-0.320690
-0.259014
-0.198798
-0.140051
-0.082562
-0.025961
0.030215
0.086466
0.143270
0.201033
0.260034
0.320375
0.381939
0.444344
0.506933
0.568776
0.628723
0.685510
0.737899
0.784843
0.825634
0.859987
0.888046
0.910305
0.927484
0.940390
0.949802
0.956397
0.960706
0.963106
0.963823
0.962938
0.960390
0.955988
0.949401
0.940174
0.927737
0.911448
0.890652
0.864768
0.833385
0.796344
0.753794
0.706192
0.654243
0.598824
0.540875
0.481312
0.420968
0.360552
0.300648
0.241715
0.184112
0.128111
0.073920
0.021691
-0.045413
-0.086976
-0.132375
-0.181645
-0.234671
-0.291146
-0.350528
-0.412014
-0.474546
-0.536847
-0.597512
-0.655131
-0.708433
-0.756422
-0.798475
-0.834366
-0.864242
-0.888536
-0.907862
-0.922914
-0.934374
-0.942856
-0.948870
-0.952808
-0.954940
-0.955417
-0.954272
-0.951424
-0.946672
-0.939696
-0.930051
-0.917178
-0.900424
-0.879088
-0.852495
-0.820091
-0.781559
-0.736907
-0.686535
-0.631217
-0.572029
-0.510212
-0.447014
-0.383550
-0.320690
-0.259014
-0.198798
-0.140051
-0.082562
-0.025961
0.030215
0.086466
0.143270
0.201033
0.260034
0.320375
0.381939
0.444344
0.506933
0.568776
0.628723
0.685510
0.737899
0.784843
0.825634
0.859987
0.888046
0.910305
0.927484
0.940390
0.949802
0.956397
0.960706
0.963106
0.963823
0.962938
0.960390
0.955988
0.949401
0.940174
0.927737
0.911448
0.890652
0.864768
0.833385
0.796344
0.753794
0.706192
0.654243
0.598824
0.540875
0.481312
0.420968
0.360552
0.300648
0.241715
0.184112
0.128111
0.073920
0.021691
-0.045413
-0.086976
-0.132375
-0.181645
-0.234671
-0.291146
-0.350528
-0.412014
-0.474546
-0.536847
-0.597512
-0.655131
-0.708433
-0.756422
-0.798475
-0.834366
-0.864242
-0.888536
-0.907862
-0.922914
-0.934374
-0.942856
-0.948870
-0.952808
-0.954940
-0.955417
-0.954272
-0.951424
-0.946672
-0.939696
-0.930051
-0.917178
-0.900424
-0.879088
-0.852495
-0.820091
-0.781559
-0.736907
-0.686535
-0.631217
-0.572029
-0.510212
-0.447014
-0.383550
-0.320690
-0.259014
-0.198798
-0.140051
-0.082562
-0.025961
0.030215
0.086466
0.143270
0.201033
0.260034
0.320375
0.381939
0.444344
0.506933
0.568776
0.628723
0.685510
0.737899
0.784843
0.825634
0.859987
0.888046
0.910305
0.927484
0.940390
0.949802
0.956397
0.960706
0.963106
0.963823
0.962938
0.960390
0.955988
0.949401
0.940174
0.927737
0.911448
0.890652
0.864768
0.833385
0.796344
0.753794
0.706192
0.654243
0.598824
0.540875
0.481312
0.420968
0.360552
0.300648
0.241715
0.184112
0.128111
0.073920
0.021691
-0.045413
-0.086976
-0.132375
-0.181645
-0.234671
-0.291146
-0.350528
-0.412014
-0.474546
-0.536847
-0.597512
-0.655131
-0.708433
-0.756422
-0.798475
-0.834366
-0.864242
-0.888536
-0.907862
-0.922914
-0.934374
-0.942856
-0.948870
-0.952808
-0.954940
-0.955417
-0.954272
-0.951424
-0.946672
-0.939696
-0.930051
-0.917178
-0.900424
-0.879088
-0.852495
-0.820091
-0.781559
-0.736907
-0.686535
-0.631217
-0.572029
-0.510212
-0.447014
-0.383550
-0.320690
-0.259014
-0.198798
-0.140051
-0.082562
-0.025961
0.030215
0.086466
0.143270
0.201033
0.260034
0.320375
0.381939
0.444344
0.506933
0.568776
0.628723
0.685510
0.737899
0.784843
0.825634
0.859987
0.888046
0.910305
0.927484
0.940390
0.949802
0.956397
0.960706
0.963106
0.963823
0.962938
0.960390
0.955988
0.949401
0.940174
0.927737
0.911448
0.890652
0.864768
0.833385
0.796344
0.753794
0.706192
0.654243
0.598824
0.540875
0.481312
0.420968
0.360552
0.300648
0.241715
0.184112
0.128111
0.073920
0.021691
-0.045413
-0.086976
-0.132375
-0.181645
-0.234671
-0.291146
-0.350528
-0.412014
-0.474546
-0.536847
-0.597512
-0.655131
-0.708433
-0.756422
-0.798475
-0.834366
-0.864242
-0.888536
-0.907862
-0.922914
-0.934374
-0.942856
-0.948870
-0.952808
-0.954940
-0.955417
-0.954272
-0.951424
-0.946672
-0.939696
-0.930051
-0.917178
-0.900424
-0.879088
-0.852495
-0.820091
-0.781559
-0.736907
-0.686535
-0.631217
-0.572029
-0.510212
-0.447014
-0.383550
-0.320690
-0.259014
-0.198798
-0.140051
-0.082562
-0.025961
0.030215
0.086466
0.143270
0.201033
0.260034
0.320375
0.381939
0.444344
0.506933
0.568776
0.628723
0.685510
0.737899
0.784843
0.825634
0.859987
0.888046
0.910305
0.927484
0.940390
0.949802
0.956397
0.960706
0.963106
0.963823
0.962938
0.960390
0.955988
0.949401
0.940174
0.927737
0.911448
0.890652
0.864768
0.833385
0.796344
0.753794
0.706192
0.654243
0.598824
0.540875
0.481312
0.420968
0.360552
0.300648
0.241715
0.184112
0.128111
0.073920
0.021691
/artificial_neural_network/trunk/test_bench/data_out_tb.txt
0,0 → 1,495
-11
-18
-41
-40
-62
-74
-86
-112
-123
-133
-154
-173
-181
-196
-205
-212
-223
-226
-233
-235
-238
-241
-243
-243
-245
-244
-243
-243
-242
-241
-238
-235
-231
-224
-217
-208
-199
-187
-170
-159
-143
-128
-113
-96
-78
-68
-53
-33
-23
 
10
23
43
54
67
80
105
109
129
141
161
178
189
204
211
221
227
235
236
241
243
244
247
246
246
246
246
243
242
237
239
236
227
222
211
203
192
181
167
153
139
125
109
92
77
59
49
30
19
5
-11
-18
-41
-40
-62
-74
-86
-112
-123
-133
-154
-173
-181
-196
-205
-212
-223
-226
-233
-235
-238
-241
-243
-243
-245
-244
-243
-243
-242
-241
-238
-235
-231
-224
-217
-208
-199
-187
-170
-159
-143
-128
-113
-96
-78
-68
-53
-33
-23
 
10
23
43
54
67
80
105
109
129
141
161
178
189
204
211
221
227
235
236
241
243
244
247
246
246
246
246
243
242
237
239
236
227
222
211
203
192
181
167
153
139
125
109
92
77
59
49
30
19
5
-11
-18
-41
-40
-62
-74
-86
-112
-123
-133
-154
-173
-181
-196
-205
-212
-223
-226
-233
-235
-238
-241
-243
-243
-245
-244
-243
-243
-242
-241
-238
-235
-231
-224
-217
-208
-199
-187
-170
-159
-143
-128
-113
-96
-78
-68
-53
-33
-23
 
10
23
43
54
67
80
105
109
129
141
161
178
189
204
211
221
227
235
236
241
243
244
247
246
246
246
246
243
242
237
239
236
227
222
211
203
192
181
167
153
139
125
109
92
77
59
49
30
19
5
-11
-18
-41
-40
-62
-74
-86
-112
-123
-133
-154
-173
-181
-196
-205
-212
-223
-226
-233
-235
-238
-241
-243
-243
-245
-244
-243
-243
-242
-241
-238
-235
-231
-224
-217
-208
-199
-187
-170
-159
-143
-128
-113
-96
-78
-68
-53
-33
-23
 
10
23
43
54
67
80
105
109
129
141
161
178
189
204
211
221
227
235
236
241
243
244
247
246
246
246
246
243
242
237
239
236
227
222
211
203
192
181
167
153
139
125
109
92
77
59
49
30
19
5
-11
-18
-41
-40
-62
-74
-86
-112
-123
-133
-154
-173
-181
-196
-205
-212
-223
-226
-233
-235
-238
-241
-243
-243
-245
-244
-243
-243
-242
-241
-238
-235
-231
-224
-217
-208
-199
-187
-170
-159
-143
-128
-113
-96
-78
-68
-53
-33
-23
 
10
23
43
54
67
80
105
109
129
141
161
178
189
204
211
221
227
235
236
241
243
244
247
246
246
246
246
243
242
237
239
236
227
222
211
203
192
181
167
153
139
125
109
92
77
/artificial_neural_network/trunk/test_bench/makefile
0,0 → 1,40
SRC_KER_DIR = ../ANN_kernel/RTL_VHDL_files
SRC_TB_DIR = ./src
VHDLS = \
${SRC_KER_DIR}/layers_pkg.vhd \
${SRC_KER_DIR}/support_pkg.vhd \
${SRC_KER_DIR}/wb_init.vhd \
${SRC_KER_DIR}/mac.vhd \
${SRC_KER_DIR}/af_sigmoid2.vhd \
${SRC_KER_DIR}/activation_function.vhd \
${SRC_KER_DIR}/shiftreg_pl.vhd \
${SRC_KER_DIR}/shiftreg_pu.vhd \
${SRC_KER_DIR}/adder_tree.vhd \
${SRC_KER_DIR}/layerPS.vhd \
${SRC_KER_DIR}/layerPS_top.vhd \
${SRC_KER_DIR}/layerSP.vhd \
${SRC_KER_DIR}/layerSP_top.vhd \
${SRC_KER_DIR}/ann.vhd \
${SRC_TB_DIR}/ann_tb.vhd
 
# STD=standard
STD=synopsys
VSTD=93c
# VSTD=08
ENTITY=ann_tb
#RUN_OPTIONS= --stop-time=1000ns --wave=${ENTITY}.ghw
RUN_OPTIONS= --wave=${ENTITY}.ghw
 
all: ${ENTITY}.ghw
no_ghw : ${ENTITY}
./${ENTITY}
reader: ${ENTITY} ${ENTITY}.ghw
gtkwave ${ENTITY}.ghw ${ENTITY}.sav
${ENTITY}: ${VHDLS}
ghdl -a -g --mb-comments --workdir=comp --std=${VSTD} ${VHDLS}
ghdl -e -g --mb-comments --workdir=comp --std=${VSTD} -fexplicit --ieee=${STD} ${ENTITY}
${ENTITY}.ghw: ${ENTITY}
./${ENTITY} ${RUN_OPTIONS}
clean:
rm -f comp/* *.o *.vcd *.ghw events* ${ENTITY}
/artificial_neural_network/trunk/test_bench/octave/getwbc.m
0,0 → 1,15
function [wb N_layers] = getwbc(NET)
% save neural network's weigths
% and biases in a cell array
 
N_layers = NET.numLayers;
 
wb = cell(N_layers,2);
 
wb(1,1) = cell2mat(NET.IW(1,1));
wb(1,2) = cell2mat(NET.b(1));
 
for(i=2:N_layers)
wb(i,1) = cell2mat(NET.LW(i,i-1));
wb(i,2) = cell2mat(NET.b(i));
end;
/artificial_neural_network/trunk/test_bench/octave/max_dim.m
0,0 → 1,6
function N = max_dim(CA)
% largest neuron count in layer
N = 0;
for (i=1:size(CA,1))
N = max(N, max(size(cell2mat(CA(i,1)))));
end;
/artificial_neural_network/trunk/test_bench/octave/nn_ex.m
0,0 → 1,31
x_tr = 2*rand(1,10000)-1;
y_tr = sin(pi*x_tr);
 
 
PR = zeros(1,2);
PR(1,1) = min(x_tr);
PR(1,2) = max(x_tr);
 
SS = [2 3 1];
 
NET = newff (PR,SS,{"tansig" "tansig" "tansig"},"trainlm","learngdm","mse");
 
NET.trainParam.min_grad = 0;
NET.trainParam.epochs= 150;
NET = train(NET,x_tr,y_tr);
x_val = linspace(-1,1,100);
x_val = [x_val x_val x_val x_val x_val];
y_val = sim(NET, x_val);
 
 
plot(y_val,'.');
 
wb_gen(NET);
% system('mv wb_init.vhd ../src/wb_init.vhd')
 
fid = fopen('../data_in.txt','w');
fprintf(fid,'%f\n',x_val);
fclose(fid);
fid = fopen('../data_out_oct.txt','w');
fprintf(fid,'%f\n',y_val);
fclose(fid);
/artificial_neural_network/trunk/test_bench/octave/wb_gen.m
0,0 → 1,117
function wb_gen(NET)
 
[wbMat NLayers] = getwbc(NET);
MaxNeuronCnt = max_dim(wbMat);
 
fileid = fopen ('wb_init.vhd','w');
 
% addr weights:
% |_ _| |0| |_ _ _ _ _| |_ _ _ _ _ _|
% layer bias neuron input
% *2^12+ 0 + *2^5 + *2^0
% addr biases:
% |_ _| |1| |0 0 0 0 0 0| |_ _ _ _ _|
% layer bias neuron
% *2^12+ 2^11 + 0 + *2^0
 
fprintf(fileid,'library ieee;\n');
fprintf(fileid,'use ieee.std_logic_1164.all;\n');
fprintf(fileid,'use ieee.numeric_std.all;\n');
fprintf(fileid,'library work;\n');
fprintf(fileid,'use work.support_pkg.all;\n');
fprintf(fileid,'use work.layers_pkg.all;\n');
fprintf(fileid,'package wb_init is\n');
 
fprintf(fileid,' type ramd_type is array (%i downto 0) of std_logic_vector(NbitW-1 downto 0);\n',MaxNeuronCnt);
fprintf(fileid,' type layer_ram is array (%i downto 0) of ramd_type;\n',MaxNeuronCnt);
fprintf(fileid,' type w_ram is array (integer range <>) of layer_ram;\n');
fprintf(fileid,' type b_type is array (integer range <>) of ramd_type;\n');
 
fprintf(fileid,' constant w_init : w_ram :=\n');
fprintf(fileid,' (\n');
for(k=1:NLayers)
fprintf(fileid,' %i => (\n',k-1);
for(i=1:size(cell2mat(wbMat(k,1)),2)) % neurons
fprintf(fileid,' %i => (\n',i-1);
for(j=1:size(cell2mat(wbMat(k,1)),1)) % inputs
fprintf(fileid,' %i => real2stdlv(NbitW,%1.4f)',j-1, cell2mat(wbMat(k,1))(j,i));
if j != size(cell2mat(wbMat(k,1)),1)
fprintf(fileid,',\n');
else
fprintf(fileid,',\n others =>(others => ''0'')\n');
end;
end;
if i != size(cell2mat(wbMat(k,1)),2)
fprintf(fileid,' ),\n');
else
fprintf(fileid,' ),\n others=>(others =>(others => ''0''))\n');
end;
end;
if k != NLayers
fprintf(fileid,' ),\n');
else
fprintf(fileid,' )\n');
end;
end;
fprintf(fileid,' );\n\n');
 
fprintf(fileid,' constant b_init : b_type :=\n');
fprintf(fileid,' (\n');
for(k=1:NLayers)
fprintf(fileid,' %i => (\n',k-1);
for(j=1:length(cell2mat(wbMat(k,2)))) % inputs
fprintf(fileid,' %i => real2stdlv(NbitW,(2.0**LSB_OUT)*(%1.4f))',j-1, cell2mat(wbMat(k,2))(j));
if j != length(cell2mat(wbMat(k,2)))
fprintf(fileid,',\n');
else
fprintf(fileid,',\n others =>(others => ''0'')\n');
end;
end;
if k != NLayers
fprintf(fileid,' ),\n');
else
fprintf(fileid,' )\n');
end;
end;
fprintf(fileid,' );\n');
 
 
% fprintf(fileid,' constant b0_init : ramd_type0 :=\n');
% fprintf(fileid,' (\n');
% for(i=1:length(nn_data.b1))
% fprintf(fileid,' %i => real2stdlv(NbitW,(2.0**LSB_OUT)*(%.4f))',i-1,nn_data.b1(i));
% if i != length(nn_data.b1)
% fprintf(fileid,',\n');
% else
% fprintf(fileid,'\n');
% end;
 
% end;
% fprintf(fileid,' );\n');
 
% fprintf(fileid,' constant b1_init : ramd_type1 :=\n');
% fprintf(fileid,' (\n');
% for(i=1:length(nn_data.b2))
% fprintf(fileid,' %i => real2stdlv(NbitW,(2.0**LSB_OUT)*(%.4f))',i-1,nn_data.b2(i));
% if i != length(nn_data.b2)
% fprintf(fileid,',\n');
% else
% fprintf(fileid,'\n');
% end;
% end;
% fprintf(fileid,' );\n');
 
% fprintf(fileid,' constant b2_init : ramd_type2 :=\n');
% fprintf(fileid,' (\n');
% for(i=1:length(nn_data.b3))
% fprintf(fileid,' %i => real2stdlv(NbitW,(2.0**LSB_OUT)*(%.4f))',i-1,nn_data.b3(i));
% if i != length(nn_data.b3)
% fprintf(fileid,',\n');
% else
% fprintf(fileid,'\n');
% end;
% end;
% fprintf(fileid,' );\n');
 
fprintf(fileid,'end wb_init;\n');
fclose(fileid);
/artificial_neural_network/trunk/test_bench/run.sh
0,0 → 1,11
#!/bin/bash
 
echo neural network training and wb_init.vhd file generation...
cd octave
octave nn_ex.m
mv wb_init.vhd ../../ANN_kernel/RTL_VHDL_files/wb_init.vhd
cd ..
echo GHDL simulation
make
echo results from octave vs results from GHDL
octave show_res.m
artificial_neural_network/trunk/test_bench/run.sh Property changes : Added: svn:executable ## -0,0 +1 ## +* \ No newline at end of property Index: artificial_neural_network/trunk/test_bench/show_res.m =================================================================== --- artificial_neural_network/trunk/test_bench/show_res.m (nonexistent) +++ artificial_neural_network/trunk/test_bench/show_res.m (revision 8) @@ -0,0 +1,11 @@ +y_tb = load('data_out_tb.txt'); +plot(y_tb./256,'b'); +hold on; +y_oct = load('data_out_oct.txt'); +plot(y_oct,'r'); +title('results from octave vs results from ghdl test bench') +legend({"results from ghdl","results from octave"}) + +disp('press any key to continue'); + +pause; Index: artificial_neural_network/trunk/test_bench/src/ann_tb.vhd =================================================================== --- artificial_neural_network/trunk/test_bench/src/ann_tb.vhd (nonexistent) +++ artificial_neural_network/trunk/test_bench/src/ann_tb.vhd (revision 8) @@ -0,0 +1,172 @@ +------------------------------------------------------------------------------- +-- Title : Testbench for design "ann". XOR solving neural network. +-- Project : +------------------------------------------------------------------------------- +-- File : ann_tb.vhd +-- Author : Jurek Stefanowicz +-- Company : +-- Created : 2016-09-30 +-- Last update: 2016-09-30 +-- Platform : +-- Standard : VHDL'87 +------------------------------------------------------------------------------- +-- Description: +------------------------------------------------------------------------------- +-- Copyright (c) 2016 +------------------------------------------------------------------------------- + +library ieee; +use ieee.std_logic_1164.all; +use ieee.numeric_std.all; +use ieee.math_real.all; + +library std; +use std.textio.all; + +library work; +use work.layers_pkg.all; +use work.support_pkg.all; + +------------------------------------------------------------------------------- + +entity ann_tb is +end ann_tb; + +------------------------------------------------------------------------------- + +architecture beh1 of ann_tb is + + -- testbech signals + signal end_sim : boolean := false; + + file data_out : text open write_mode is "data_out_tb.txt"; + file data_in : text open read_mode is "data_in.txt"; + + signal reset : std_logic := '1'; + signal clk : std_logic := '1'; + + -- ann input sigs + signal run_in : std_logic := '0'; -- Start and input data validation + signal inputs : std_logic_vector(Nbit-1 downto 0) := (others => '0'); -- Input data + + -- weight&bias memory interface (not used) + signal wdata : std_logic_vector(NbitW-1 downto 0) := (others => '0'); -- Weight and bias memory write data + signal addr : std_logic_vector((calculate_addr_l(NumIn, NumN, Nlayer)+log2(Nlayer))-1 downto 0) := (others => '0'); -- Weight and bias memory address + signal m_en : std_logic := '0'; -- Weight and bias memory enable (external interface) + signal m_we : std_logic_vector(((NbitW+7)/8)-1 downto 0) := (others => '0'); + + signal run_out : std_logic; -- Output data validation + signal rdata : std_logic_vector(NbitW-1 downto 0); -- Weight and bias memory read data + signal outputs : std_logic_vector(Nbit-1 downto 0); -- Output data + + +begin + -- component instantiation + ann0 : entity work.ann + generic map ( + WBinit => true, + Nlayer => Nlayer, + NbitW => NbitW, + NumIn => NumIn, + NbitIn => Nbit, + NumN => NumN, + l_type => l_type, + f_type => f_type, + LSbit => LSbit, + NbitO => NbitO, + NbitOut => NbitOut) + port map ( + -- in + reset => reset, + clk => clk, + run_in => run_in, + m_en => m_en, + m_we => m_we, + inputs => inputs, + wdata => wdata, + addr => addr, + -- out + run_out => run_out, + rdata => rdata, + outputs => outputs + ); + -- clock generation + Clk <= not Clk after 10 ns when end_sim = false else '0'; + + -- xor wieghts: + -- layer0: addresses: + -- weights: layer bias neuron input + -- neuron 1, input 1 : -3.7596 0 0 0 0 + -- neuron 1, input 2 : 3.0396 0 0 0 1 + -- neuron 2, input 1 : 2.3740 0 0 1 0 + -- neuron 2, input 2 : -2.1895 0 0 1 1 + -- bias: layer bias x neuron + -- neuron 1 : 2.20762 0 1 0 0 + -- neuron 2 : 0.96043 0 1 0 1 + -- layer1: + -- weights: layer bias neuron input + -- neuron 1, input 1 : -2.2381 1 0 0 0 + -- neuron 1, input 2 : -2.2888 1 0 0 1 + -- bias: layer bias x neuron + -- neuron 1: 3.8896 1 1 0 0 + -- + + DataSave : process(Clk) + variable my_line : line; -- type 'line' comes from textio + begin + if (Clk'event and Clk = '1' ) then + if ( run_out = '1') then + write(my_line, to_integer(signed(outputs))); + writeline(data_out, my_line); + end if; + end if; + end process; + + DataLoad : process + variable input_line : line; + variable din : real; + begin + wait for 20 ns; + reset <= '0'; + wait until clk = '0'; + wait until clk = '1'; + wait until clk = '0'; + wait until clk = '1'; + + l1 : while not end_sim loop + if not endfile(data_in) then + readline(data_in, input_line); + read(input_line, din); + else + end_sim <= true; + exit l1; + end if; + run_in <= '1'; + inputs <= std_logic_vector(to_signed(integer(din*(2.0**LSB_In)),NbitIn)); + + wait until clk = '0'; + wait until clk = '1'; + run_in <= '0'; + + -- We wait 4 clock cycles between run_ins because + -- the network has a maximum layers size of 3 neurons + + wait until clk = '0'; + wait until clk = '1'; + + wait until clk = '0'; + wait until clk = '1'; + + wait until clk = '0'; + wait until clk = '1'; + + --wait until clk = '0'; + --wait until clk = '1'; + + end loop l1; + wait ; + end process; + +end beh1; + +

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.