OpenCores
URL https://opencores.org/ocsvn/artificial_neural_network/artificial_neural_network/trunk

Subversion Repositories artificial_neural_network

Compare Revisions

  • This comparison shows the changes necessary to convert path
    /artificial_neural_network/trunk/ANN_kernel/RTL_VHDL_files
    from Rev 9 to Rev 10
    Reverse comparison

Rev 9 → Rev 10

/activation_function.vhd
76,7 → 76,26
siglut_inst: entity work.af_sigmoid2
generic map
(
Nbit => Nbit,
Nbit => Nbit
)
port map
(
reset => reset,
clk => clk,
run_in => run_in,
inputs => inputs,
run_out => run_out,
outputs => outputs
);
end generate;
 
-- Example 3: sigmoid activation function implemented as a LUT, with a second different set of parameters:
sigmoid_mat:
if (f_type = "sigmat") generate
siglut_inst: entity work.af_sigmoid_mat
generic map
(
Nbit => Nbit,
lsbit => lsbit
)
port map
/af_sigmoid.vhd
46,7 → 46,7
-- Definition of internal modules, constants, signals, etc...
 
-- Sigmoid parameters:
constant f0 : real := 2.0; -- Slope at the origin
constant f0 : real := 1.0; -- Slope at the origin
constant fr : real := 2.0; -- fr = fmax - fmin
 
signal dataIn: integer range (2**Nbit-1) downto 0; -- To convert std_logic_vector input to integer index for the LUT
/af_sigmoid2.vhd
28,8 → 28,7
entity af_sigmoid2 is
generic
(
Nbit : natural := 8;
lsbit : natural := 10
Nbit : natural := 8
);
port
(
48,7 → 47,7
-- Definition of internal modules, constants, signals, etc...
 
-- Sigmoid parameters:
constant f0 : real := 1.0; -- Slope at the origin
constant f0 : real := 0.5; -- Slope at the origin
constant fr : real := 2.0; -- fr = fmax - fmin
 
signal dataIn: integer range (2**Nbit-1) downto 0; -- To convert std_logic_vector input to integer index for the LUT
55,8 → 54,8
type table_t is array(0 to (2**Nbit)-1) of std_logic_vector(Nbit-1 downto 0); -- LUT type
 
-- Function Sigmoidal: generates the Look-Up-Table for the sigmoid activation function:
-- margin: maximum value of input
function Sigmoidal(margin:real;Nbit:natural;lsbit:natural) return table_t is
-- margin: maximun value of x.
function Sigmoidal(margin:real;Nbit:natural) return table_t is
variable scale,x,y,w,t: real;
variable u: integer;
variable fbits: std_logic_vector(Nbit-1 downto 0);
63,10 → 62,10
variable table: table_t;
begin
scale := (2.0*margin)/(2.0**Nbit); -- Calculates gap between to points
x := -margin;
x := -margin;
for idx in -(2**(Nbit-1)) to (2**(Nbit-1))-1 loop
y := ( fr / (1.0+exp(((-4.0*f0)/fr)*x)) ) - (fr/2.0);
w := y*(2.0**(lsbit)); -- Shifts bits to the left
w := y*(2.0**(Nbit-1)); -- Shifts bits to the left
t := round(w);
u := integer(t);
fbits := std_logic_vector(to_signed(u,Nbit));
75,12 → 74,12
end loop;
return table;
end Sigmoidal;
signal Table: table_t := Sigmoidal(2.0**(Nbit-lsbit-1),Nbit,lsbit); -- Generation of the LUT (at synthesis time)
signal Table: table_t := Sigmoidal(1.0,Nbit); -- Generation of the LUT (at synthesis time)
 
begin
 
-- Description of the activation function
dataIn <= to_integer(unsigned(inputs));
dataIn <= to_integer(signed(inputs));
 
Activacion: process(clk,reset)
begin
/af_sigmoid_mat.vhd
0,0 → 1,101
----------------------------------------------------------------------------------
-- Company: CEI
-- Engineer: Enrique Herrero
--
-- Create Date:
-- Design Name: Configurable ANN
-- Module Name: af_sigmoid_mat - Behavioral
-- Project Name:
-- Target Devices:
-- Tool versions:
-- Description: Sigmoid activation function implemented as a Look-Up-Table (LUT).
-- Alternative set of parameters.
--
-- Dependencies:
--
-- Revision:
-- Revision 0.01 - File Created
-- Revision 1 - David Aledo
-- Additional Comments:
--
----------------------------------------------------------------------------------
library IEEE;
use IEEE.STD_LOGIC_1164.ALL;
use ieee.numeric_std.ALL;
use ieee.math_real.all;
 
 
entity af_sigmoid_mat is
generic
(
Nbit : natural := 8;
lsbit : natural := 10
);
port
(
reset : in std_logic;
clk : in std_logic;
run_in : in std_logic; -- Start and input data validation
inputs : in std_logic_vector(Nbit-1 downto 0); -- Input data
run_out : out std_logic; -- Output data validation, run_in for the next layer
outputs : out std_logic_vector(Nbit-1 downto 0) -- Output data
);
end af_sigmoid_mat;
 
 
architecture Behavioral of af_sigmoid_mat is
 
-- Definition of internal modules, constants, signals, etc...
 
-- Sigmoid parameters:
constant f0 : real := 1.0; -- Slope at the origin
constant fr : real := 2.0; -- fr = fmax - fmin
 
signal dataIn: integer range (2**Nbit-1) downto 0; -- To convert std_logic_vector input to integer index for the LUT
type table_t is array(0 to (2**Nbit)-1) of std_logic_vector(Nbit-1 downto 0); -- LUT type
 
-- Function Sigmoidal: generates the Look-Up-Table for the sigmoid activation function:
-- margin: maximum value of input
function Sigmoidal(margin:real;Nbit:natural;lsbit:natural) return table_t is
variable scale,x,y,w,t: real;
variable u: integer;
variable fbits: std_logic_vector(Nbit-1 downto 0);
variable table: table_t;
begin
scale := (2.0*margin)/(2.0**Nbit); -- Calculates gap between to points
x := -margin;
for idx in -(2**(Nbit-1)) to (2**(Nbit-1))-1 loop
y := ( fr / (1.0+exp(((-4.0*f0)/fr)*x)) ) - (fr/2.0);
w := y*(2.0**(lsbit)); -- Shifts bits to the left
t := round(w);
u := integer(t);
fbits := std_logic_vector(to_signed(u,Nbit));
table(to_integer(to_unsigned(idx+(2**Nbit),Nbit))):= fbits;
x := x+scale;
end loop;
return table;
end Sigmoidal;
signal Table: table_t := Sigmoidal(2.0**(Nbit-lsbit-1),Nbit,lsbit); -- Generation of the LUT (at synthesis time)
 
begin
 
-- Description of the activation function
dataIn <= to_integer(unsigned(inputs));
 
Activacion: process(clk,reset)
begin
if clk'event and clk = '1' then
if reset = '1' then
run_out <= '0';
outputs <= (others => '0');
else
if run_in = '1' then
run_out<='1';
outputs<=Table(dataIn); -- Assigns output value from the LUT
else
run_out<='0';
end if;
end if;
end if;
end process;
end Behavioral;
/layerPS_top.vhd
22,7 → 22,7
use IEEE.STD_LOGIC_1164.ALL;
use ieee.numeric_std.all;
 
library work;
use work.wb_init.all; -- initialization package, comment out when not used
-- Deprecated XPS library:
33,17 → 33,17
 
generic
(
WBinit : boolean := false;
LNum : natural := 0; ------- layer number (needed for initialization)
NumN : natural := 34; ------- Number of neurons of the layer
NumIn : natural := 27; ------- Number of inputs of each neuron
NbitIn : natural := 8; ------- Bit width of the input data
NbitW : natural := 1; ------- Bit width of weights and biases
NumN : natural := 64; ------- Number of neurons of the layer
NumIn : natural := 8; ------- Number of inputs of each neuron
NbitIn : natural := 12; ------- Bit width of the input data
NbitW : natural := 8; ------- Bit width of weights and biases
NbitOut : natural := 8; ------- Bit width of the output data
lra_l : natural := 11; ------- Layer RAM address length. It should value log2(NumN)+log2(NumIn)
wra_l : natural := 5; ------- Weight RAM address length. It should value log2(NumIn)
lra_l : natural := 10; ------- Layer RAM address length. It should value log2(NumN)+log2(NumIn)
wra_l : natural := 3; ------- Weight RAM address length. It should value log2(NumIn)
bra_l : natural := 6; ------- Bias RAM address length. It should value log2(NumN)
LSbit : natural := 6 ------- Less significant bit of the outputs
LSbit : natural := 4; ------- Less significant bit of the outputs
WBinit : boolean := false;
LNum : natural := 0 ------- layer number (needed for initialization)
);
 
port
69,6 → 69,8
 
architecture Behavioral of layerPS_top is
 
--type ramd_type is array (pad_power2(NumN)-1 downto 0) of std_logic_vector(NbitW-1 downto 0); -- Optimal: 32 or 64 spaces -- pad_power2() only for simulation
--type layer_ram is array (pad_power2(NumIn)-1 downto 0) of ramd_type;
type ramd_type is array (NumN-1 downto 0) of std_logic_vector(NbitW-1 downto 0); -- Optimal: 32 or 64 spaces
type layer_ram is array (NumIn-1 downto 0) of ramd_type;
type outm_type is array (NumIn-1 downto 0) of std_logic_vector(NbitW-1 downto 0);
/layerSP_top.vhd
33,17 → 33,18
 
generic
(
NumN : natural := 8; ------- Number of neurons of the layer
NumIn : natural := 64; ------- Number of inputs of each neuron
NbitIn : natural := 8; ------- Bit width of the input data
NbitW : natural := 8; ------- Bit width of weights and biases
NbitOut : natural := 12; ------- Bit width of the output data
lra_l : natural := 10; ------- Layer RAM address length. It should value log2(NumN)+log2(NumIn)
wra_l : natural := 6; ------- Weight RAM address length. It should value log2(NumIn)
bra_l : natural := 3; ------- Bias RAM address length. It should value log2(NumN)
LSbit : natural := 4; ------- Less significant bit of the outputs
WBinit : boolean := false;
LNum : natural := 0; ------- layer number (needed for initialization)
NumN : natural := 34; ------- Number of neurons of the layer
NumIn : natural := 27; ------- Number of inputs of each neuron
NbitIn : natural := 8; ------- Bit width of the input data
NbitW : natural := 32; ------- Bit width of weights and biases
NbitOut : natural := 8; ------- Bit width of the output data
lra_l : natural := 11; ------- Layer RAM address length. It should value log2(NumN)+log2(NumIn)
wra_l : natural := 5; ------- Weight RAM address length. It should value log2(NumIn)
bra_l : natural := 6; ------- Bias RAM address length. It should value log2(NumN)
LSbit : natural := 6 ------- Less significant bit of the outputs
LNum : natural := 0 ------- layer number (needed for initialization)
 
);
 
port
210,8 → 211,6
process (clk)
begin
if (clk'event and clk = '1') then
--report "addr: " & integer'image(wra_l-1);
--report "addr: " & integer'image(to_integer(uaddr(wra_l-1 downto 0)) );
if (m_en = '1') then
if (b_sel = '1') then
rdata <= breg(to_integer(uaddr(bra_l-1 downto 0))); -- Bias registers selected
/support_pkg.vhd
19,7 → 19,7
constant LSbit : int_vector(Nlayer-1 downto 0) := assign_ints("8 8 8",Nlayer);
constant NbitO : int_vector(Nlayer-1 downto 0) := assign_ints("12 12 12",Nlayer);
constant l_type : string := "SP PS SP"; -- Layer type of each layer
constant f_type : string := "siglu2 siglu2 siglu2"; -- Activation function type of each layer
constant f_type : string := "sigmat sigmat sigmat"; -- Activation function type of each layer
 
function real2stdlv (bitW : natural; din : real) return std_logic_vector;
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.