OpenCores
URL https://opencores.org/ocsvn/artificial_neural_network/artificial_neural_network/trunk

Subversion Repositories artificial_neural_network

Compare Revisions

  • This comparison shows the changes necessary to convert path
    /artificial_neural_network
    from Rev 10 to Rev 9
    Reverse comparison

Rev 10 → Rev 9

/trunk/ANN_kernel/RTL_VHDL_files/activation_function.vhd
76,26 → 76,7
siglut_inst: entity work.af_sigmoid2
generic map
(
Nbit => Nbit
)
port map
(
reset => reset,
clk => clk,
run_in => run_in,
inputs => inputs,
run_out => run_out,
outputs => outputs
);
end generate;
 
-- Example 3: sigmoid activation function implemented as a LUT, with a second different set of parameters:
sigmoid_mat:
if (f_type = "sigmat") generate
siglut_inst: entity work.af_sigmoid_mat
generic map
(
Nbit => Nbit,
Nbit => Nbit,
lsbit => lsbit
)
port map
/trunk/ANN_kernel/RTL_VHDL_files/af_sigmoid.vhd
46,7 → 46,7
-- Definition of internal modules, constants, signals, etc...
 
-- Sigmoid parameters:
constant f0 : real := 1.0; -- Slope at the origin
constant f0 : real := 2.0; -- Slope at the origin
constant fr : real := 2.0; -- fr = fmax - fmin
 
signal dataIn: integer range (2**Nbit-1) downto 0; -- To convert std_logic_vector input to integer index for the LUT
/trunk/ANN_kernel/RTL_VHDL_files/layerPS_top.vhd
22,7 → 22,7
use IEEE.STD_LOGIC_1164.ALL;
use ieee.numeric_std.all;
 
library work;
use work.wb_init.all; -- initialization package, comment out when not used
-- Deprecated XPS library:
33,17 → 33,17
 
generic
(
NumN : natural := 64; ------- Number of neurons of the layer
NumIn : natural := 8; ------- Number of inputs of each neuron
NbitIn : natural := 12; ------- Bit width of the input data
NbitW : natural := 8; ------- Bit width of weights and biases
WBinit : boolean := false;
LNum : natural := 0; ------- layer number (needed for initialization)
NumN : natural := 34; ------- Number of neurons of the layer
NumIn : natural := 27; ------- Number of inputs of each neuron
NbitIn : natural := 8; ------- Bit width of the input data
NbitW : natural := 1; ------- Bit width of weights and biases
NbitOut : natural := 8; ------- Bit width of the output data
lra_l : natural := 10; ------- Layer RAM address length. It should value log2(NumN)+log2(NumIn)
wra_l : natural := 3; ------- Weight RAM address length. It should value log2(NumIn)
lra_l : natural := 11; ------- Layer RAM address length. It should value log2(NumN)+log2(NumIn)
wra_l : natural := 5; ------- Weight RAM address length. It should value log2(NumIn)
bra_l : natural := 6; ------- Bias RAM address length. It should value log2(NumN)
LSbit : natural := 4; ------- Less significant bit of the outputs
WBinit : boolean := false;
LNum : natural := 0 ------- layer number (needed for initialization)
LSbit : natural := 6 ------- Less significant bit of the outputs
);
 
port
69,8 → 69,6
 
architecture Behavioral of layerPS_top is
 
--type ramd_type is array (pad_power2(NumN)-1 downto 0) of std_logic_vector(NbitW-1 downto 0); -- Optimal: 32 or 64 spaces -- pad_power2() only for simulation
--type layer_ram is array (pad_power2(NumIn)-1 downto 0) of ramd_type;
type ramd_type is array (NumN-1 downto 0) of std_logic_vector(NbitW-1 downto 0); -- Optimal: 32 or 64 spaces
type layer_ram is array (NumIn-1 downto 0) of ramd_type;
type outm_type is array (NumIn-1 downto 0) of std_logic_vector(NbitW-1 downto 0);
/trunk/ANN_kernel/RTL_VHDL_files/af_sigmoid2.vhd
28,7 → 28,8
entity af_sigmoid2 is
generic
(
Nbit : natural := 8
Nbit : natural := 8;
lsbit : natural := 10
);
port
(
47,7 → 48,7
-- Definition of internal modules, constants, signals, etc...
 
-- Sigmoid parameters:
constant f0 : real := 0.5; -- Slope at the origin
constant f0 : real := 1.0; -- Slope at the origin
constant fr : real := 2.0; -- fr = fmax - fmin
 
signal dataIn: integer range (2**Nbit-1) downto 0; -- To convert std_logic_vector input to integer index for the LUT
54,8 → 55,8
type table_t is array(0 to (2**Nbit)-1) of std_logic_vector(Nbit-1 downto 0); -- LUT type
 
-- Function Sigmoidal: generates the Look-Up-Table for the sigmoid activation function:
-- margin: maximun value of x.
function Sigmoidal(margin:real;Nbit:natural) return table_t is
-- margin: maximum value of input
function Sigmoidal(margin:real;Nbit:natural;lsbit:natural) return table_t is
variable scale,x,y,w,t: real;
variable u: integer;
variable fbits: std_logic_vector(Nbit-1 downto 0);
62,10 → 63,10
variable table: table_t;
begin
scale := (2.0*margin)/(2.0**Nbit); -- Calculates gap between to points
x := -margin;
x := -margin;
for idx in -(2**(Nbit-1)) to (2**(Nbit-1))-1 loop
y := ( fr / (1.0+exp(((-4.0*f0)/fr)*x)) ) - (fr/2.0);
w := y*(2.0**(Nbit-1)); -- Shifts bits to the left
w := y*(2.0**(lsbit)); -- Shifts bits to the left
t := round(w);
u := integer(t);
fbits := std_logic_vector(to_signed(u,Nbit));
74,12 → 75,12
end loop;
return table;
end Sigmoidal;
signal Table: table_t := Sigmoidal(1.0,Nbit); -- Generation of the LUT (at synthesis time)
signal Table: table_t := Sigmoidal(2.0**(Nbit-lsbit-1),Nbit,lsbit); -- Generation of the LUT (at synthesis time)
 
begin
 
-- Description of the activation function
dataIn <= to_integer(signed(inputs));
dataIn <= to_integer(unsigned(inputs));
 
Activacion: process(clk,reset)
begin
/trunk/ANN_kernel/RTL_VHDL_files/layerSP_top.vhd
33,18 → 33,17
 
generic
(
NumN : natural := 8; ------- Number of neurons of the layer
NumIn : natural := 64; ------- Number of inputs of each neuron
WBinit : boolean := false;
LNum : natural := 0; ------- layer number (needed for initialization)
NumN : natural := 34; ------- Number of neurons of the layer
NumIn : natural := 27; ------- Number of inputs of each neuron
NbitIn : natural := 8; ------- Bit width of the input data
NbitW : natural := 8; ------- Bit width of weights and biases
NbitOut : natural := 12; ------- Bit width of the output data
lra_l : natural := 10; ------- Layer RAM address length. It should value log2(NumN)+log2(NumIn)
wra_l : natural := 6; ------- Weight RAM address length. It should value log2(NumIn)
bra_l : natural := 3; ------- Bias RAM address length. It should value log2(NumN)
LSbit : natural := 4; ------- Less significant bit of the outputs
WBinit : boolean := false;
LNum : natural := 0 ------- layer number (needed for initialization)
 
NbitW : natural := 32; ------- Bit width of weights and biases
NbitOut : natural := 8; ------- Bit width of the output data
lra_l : natural := 11; ------- Layer RAM address length. It should value log2(NumN)+log2(NumIn)
wra_l : natural := 5; ------- Weight RAM address length. It should value log2(NumIn)
bra_l : natural := 6; ------- Bias RAM address length. It should value log2(NumN)
LSbit : natural := 6 ------- Less significant bit of the outputs
);
 
port
211,6 → 210,8
process (clk)
begin
if (clk'event and clk = '1') then
--report "addr: " & integer'image(wra_l-1);
--report "addr: " & integer'image(to_integer(uaddr(wra_l-1 downto 0)) );
if (m_en = '1') then
if (b_sel = '1') then
rdata <= breg(to_integer(uaddr(bra_l-1 downto 0))); -- Bias registers selected
/trunk/ANN_kernel/RTL_VHDL_files/support_pkg.vhd
19,7 → 19,7
constant LSbit : int_vector(Nlayer-1 downto 0) := assign_ints("8 8 8",Nlayer);
constant NbitO : int_vector(Nlayer-1 downto 0) := assign_ints("12 12 12",Nlayer);
constant l_type : string := "SP PS SP"; -- Layer type of each layer
constant f_type : string := "sigmat sigmat sigmat"; -- Activation function type of each layer
constant f_type : string := "siglu2 siglu2 siglu2"; -- Activation function type of each layer
 
function real2stdlv (bitW : natural; din : real) return std_logic_vector;
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.