OpenCores
URL https://opencores.org/ocsvn/artificial_neural_network/artificial_neural_network/trunk

Subversion Repositories artificial_neural_network

Compare Revisions

  • This comparison shows the changes necessary to convert path
    /artificial_neural_network/trunk
    from Rev 8 to Rev 7
    Reverse comparison

Rev 8 → Rev 7

/test_bench/run.sh File deleted \ No newline at end of file
test_bench/run.sh Property changes : Deleted: svn:executable ## -1 +0,0 ## -* \ No newline at end of property Index: test_bench/makefile =================================================================== --- test_bench/makefile (revision 8) +++ test_bench/makefile (nonexistent) @@ -1,40 +0,0 @@ -SRC_KER_DIR = ../ANN_kernel/RTL_VHDL_files -SRC_TB_DIR = ./src -VHDLS = \ - ${SRC_KER_DIR}/layers_pkg.vhd \ - ${SRC_KER_DIR}/support_pkg.vhd \ - ${SRC_KER_DIR}/wb_init.vhd \ - ${SRC_KER_DIR}/mac.vhd \ - ${SRC_KER_DIR}/af_sigmoid2.vhd \ - ${SRC_KER_DIR}/activation_function.vhd \ - ${SRC_KER_DIR}/shiftreg_pl.vhd \ - ${SRC_KER_DIR}/shiftreg_pu.vhd \ - ${SRC_KER_DIR}/adder_tree.vhd \ - ${SRC_KER_DIR}/layerPS.vhd \ - ${SRC_KER_DIR}/layerPS_top.vhd \ - ${SRC_KER_DIR}/layerSP.vhd \ - ${SRC_KER_DIR}/layerSP_top.vhd \ - ${SRC_KER_DIR}/ann.vhd \ - ${SRC_TB_DIR}/ann_tb.vhd - -# STD=standard -STD=synopsys -VSTD=93c -# VSTD=08 -ENTITY=ann_tb -#RUN_OPTIONS= --stop-time=1000ns --wave=${ENTITY}.ghw -RUN_OPTIONS= --wave=${ENTITY}.ghw - -all: ${ENTITY}.ghw -no_ghw : ${ENTITY} - ./${ENTITY} -reader: ${ENTITY} ${ENTITY}.ghw - gtkwave ${ENTITY}.ghw ${ENTITY}.sav -${ENTITY}: ${VHDLS} - ghdl -a -g --mb-comments --workdir=comp --std=${VSTD} ${VHDLS} - ghdl -e -g --mb-comments --workdir=comp --std=${VSTD} -fexplicit --ieee=${STD} ${ENTITY} -${ENTITY}.ghw: ${ENTITY} - ./${ENTITY} ${RUN_OPTIONS} -clean: - rm -f comp/* *.o *.vcd *.ghw events* ${ENTITY} - Index: test_bench/octave/wb_gen.m =================================================================== --- test_bench/octave/wb_gen.m (revision 8) +++ test_bench/octave/wb_gen.m (nonexistent) @@ -1,117 +0,0 @@ -function wb_gen(NET) - -[wbMat NLayers] = getwbc(NET); -MaxNeuronCnt = max_dim(wbMat); - -fileid = fopen ('wb_init.vhd','w'); - -% addr weights: -% |_ _| |0| |_ _ _ _ _| |_ _ _ _ _ _| -% layer bias neuron input -% *2^12+ 0 + *2^5 + *2^0 -% addr biases: -% |_ _| |1| |0 0 0 0 0 0| |_ _ _ _ _| -% layer bias neuron -% *2^12+ 2^11 + 0 + *2^0 - -fprintf(fileid,'library ieee;\n'); -fprintf(fileid,'use ieee.std_logic_1164.all;\n'); -fprintf(fileid,'use ieee.numeric_std.all;\n'); -fprintf(fileid,'library work;\n'); -fprintf(fileid,'use work.support_pkg.all;\n'); -fprintf(fileid,'use work.layers_pkg.all;\n'); -fprintf(fileid,'package wb_init is\n'); - -fprintf(fileid,' type ramd_type is array (%i downto 0) of std_logic_vector(NbitW-1 downto 0);\n',MaxNeuronCnt); -fprintf(fileid,' type layer_ram is array (%i downto 0) of ramd_type;\n',MaxNeuronCnt); -fprintf(fileid,' type w_ram is array (integer range <>) of layer_ram;\n'); -fprintf(fileid,' type b_type is array (integer range <>) of ramd_type;\n'); - -fprintf(fileid,' constant w_init : w_ram :=\n'); -fprintf(fileid,' (\n'); -for(k=1:NLayers) - fprintf(fileid,' %i => (\n',k-1); - for(i=1:size(cell2mat(wbMat(k,1)),2)) % neurons - fprintf(fileid,' %i => (\n',i-1); - for(j=1:size(cell2mat(wbMat(k,1)),1)) % inputs - fprintf(fileid,' %i => real2stdlv(NbitW,%1.4f)',j-1, cell2mat(wbMat(k,1))(j,i)); - if j != size(cell2mat(wbMat(k,1)),1) - fprintf(fileid,',\n'); - else - fprintf(fileid,',\n others =>(others => ''0'')\n'); - end; - end; - if i != size(cell2mat(wbMat(k,1)),2) - fprintf(fileid,' ),\n'); - else - fprintf(fileid,' ),\n others=>(others =>(others => ''0''))\n'); - end; - end; - if k != NLayers - fprintf(fileid,' ),\n'); - else - fprintf(fileid,' )\n'); - end; -end; -fprintf(fileid,' );\n\n'); - -fprintf(fileid,' constant b_init : b_type :=\n'); -fprintf(fileid,' (\n'); -for(k=1:NLayers) - fprintf(fileid,' %i => (\n',k-1); - for(j=1:length(cell2mat(wbMat(k,2)))) % inputs - fprintf(fileid,' %i => real2stdlv(NbitW,(2.0**LSB_OUT)*(%1.4f))',j-1, cell2mat(wbMat(k,2))(j)); - if j != length(cell2mat(wbMat(k,2))) - fprintf(fileid,',\n'); - else - fprintf(fileid,',\n others =>(others => ''0'')\n'); - end; - end; - if k != NLayers - fprintf(fileid,' ),\n'); - else - fprintf(fileid,' )\n'); - end; -end; -fprintf(fileid,' );\n'); - - -% fprintf(fileid,' constant b0_init : ramd_type0 :=\n'); -% fprintf(fileid,' (\n'); -% for(i=1:length(nn_data.b1)) -% fprintf(fileid,' %i => real2stdlv(NbitW,(2.0**LSB_OUT)*(%.4f))',i-1,nn_data.b1(i)); -% if i != length(nn_data.b1) -% fprintf(fileid,',\n'); -% else -% fprintf(fileid,'\n'); -% end; - -% end; -% fprintf(fileid,' );\n'); - -% fprintf(fileid,' constant b1_init : ramd_type1 :=\n'); -% fprintf(fileid,' (\n'); -% for(i=1:length(nn_data.b2)) -% fprintf(fileid,' %i => real2stdlv(NbitW,(2.0**LSB_OUT)*(%.4f))',i-1,nn_data.b2(i)); -% if i != length(nn_data.b2) -% fprintf(fileid,',\n'); -% else -% fprintf(fileid,'\n'); -% end; -% end; -% fprintf(fileid,' );\n'); - -% fprintf(fileid,' constant b2_init : ramd_type2 :=\n'); -% fprintf(fileid,' (\n'); -% for(i=1:length(nn_data.b3)) -% fprintf(fileid,' %i => real2stdlv(NbitW,(2.0**LSB_OUT)*(%.4f))',i-1,nn_data.b3(i)); -% if i != length(nn_data.b3) -% fprintf(fileid,',\n'); -% else -% fprintf(fileid,'\n'); -% end; -% end; -% fprintf(fileid,' );\n'); - -fprintf(fileid,'end wb_init;\n'); -fclose(fileid); \ No newline at end of file Index: test_bench/octave/nn_ex.m =================================================================== --- test_bench/octave/nn_ex.m (revision 8) +++ test_bench/octave/nn_ex.m (nonexistent) @@ -1,31 +0,0 @@ -x_tr = 2*rand(1,10000)-1; -y_tr = sin(pi*x_tr); - - -PR = zeros(1,2); -PR(1,1) = min(x_tr); -PR(1,2) = max(x_tr); - -SS = [2 3 1]; - -NET = newff (PR,SS,{"tansig" "tansig" "tansig"},"trainlm","learngdm","mse"); - -NET.trainParam.min_grad = 0; -NET.trainParam.epochs= 150; -NET = train(NET,x_tr,y_tr); -x_val = linspace(-1,1,100); -x_val = [x_val x_val x_val x_val x_val]; -y_val = sim(NET, x_val); - - -plot(y_val,'.'); - -wb_gen(NET); -% system('mv wb_init.vhd ../src/wb_init.vhd') - -fid = fopen('../data_in.txt','w'); -fprintf(fid,'%f\n',x_val); -fclose(fid); -fid = fopen('../data_out_oct.txt','w'); -fprintf(fid,'%f\n',y_val); -fclose(fid); Index: test_bench/octave/getwbc.m =================================================================== --- test_bench/octave/getwbc.m (revision 8) +++ test_bench/octave/getwbc.m (nonexistent) @@ -1,15 +0,0 @@ -function [wb N_layers] = getwbc(NET) -% save neural network's weigths -% and biases in a cell array - -N_layers = NET.numLayers; - -wb = cell(N_layers,2); - -wb(1,1) = cell2mat(NET.IW(1,1)); -wb(1,2) = cell2mat(NET.b(1)); - -for(i=2:N_layers) - wb(i,1) = cell2mat(NET.LW(i,i-1)); - wb(i,2) = cell2mat(NET.b(i)); -end; \ No newline at end of file Index: test_bench/octave/max_dim.m =================================================================== --- test_bench/octave/max_dim.m (revision 8) +++ test_bench/octave/max_dim.m (nonexistent) @@ -1,6 +0,0 @@ -function N = max_dim(CA) -% largest neuron count in layer -N = 0; -for (i=1:size(CA,1)) - N = max(N, max(size(cell2mat(CA(i,1))))); -end; Index: test_bench/data_out_oct.txt =================================================================== --- test_bench/data_out_oct.txt (revision 8) +++ test_bench/data_out_oct.txt (nonexistent) @@ -1,500 +0,0 @@ --0.045413 --0.086976 --0.132375 --0.181645 --0.234671 --0.291146 --0.350528 --0.412014 --0.474546 --0.536847 --0.597512 --0.655131 --0.708433 --0.756422 --0.798475 --0.834366 --0.864242 --0.888536 --0.907862 --0.922914 --0.934374 --0.942856 --0.948870 --0.952808 --0.954940 --0.955417 --0.954272 --0.951424 --0.946672 --0.939696 --0.930051 --0.917178 --0.900424 --0.879088 --0.852495 --0.820091 --0.781559 --0.736907 --0.686535 --0.631217 --0.572029 --0.510212 --0.447014 --0.383550 --0.320690 --0.259014 --0.198798 --0.140051 --0.082562 --0.025961 -0.030215 -0.086466 -0.143270 -0.201033 -0.260034 -0.320375 -0.381939 -0.444344 -0.506933 -0.568776 -0.628723 -0.685510 -0.737899 -0.784843 -0.825634 -0.859987 -0.888046 -0.910305 -0.927484 -0.940390 -0.949802 -0.956397 -0.960706 -0.963106 -0.963823 -0.962938 -0.960390 -0.955988 -0.949401 -0.940174 -0.927737 -0.911448 -0.890652 -0.864768 -0.833385 -0.796344 -0.753794 -0.706192 -0.654243 -0.598824 -0.540875 -0.481312 -0.420968 -0.360552 -0.300648 -0.241715 -0.184112 -0.128111 -0.073920 -0.021691 --0.045413 --0.086976 --0.132375 --0.181645 --0.234671 --0.291146 --0.350528 --0.412014 --0.474546 --0.536847 --0.597512 --0.655131 --0.708433 --0.756422 --0.798475 --0.834366 --0.864242 --0.888536 --0.907862 --0.922914 --0.934374 --0.942856 --0.948870 --0.952808 --0.954940 --0.955417 --0.954272 --0.951424 --0.946672 --0.939696 --0.930051 --0.917178 --0.900424 --0.879088 --0.852495 --0.820091 --0.781559 --0.736907 --0.686535 --0.631217 --0.572029 --0.510212 --0.447014 --0.383550 --0.320690 --0.259014 --0.198798 --0.140051 --0.082562 --0.025961 -0.030215 -0.086466 -0.143270 -0.201033 -0.260034 -0.320375 -0.381939 -0.444344 -0.506933 -0.568776 -0.628723 -0.685510 -0.737899 -0.784843 -0.825634 -0.859987 -0.888046 -0.910305 -0.927484 -0.940390 -0.949802 -0.956397 -0.960706 -0.963106 -0.963823 -0.962938 -0.960390 -0.955988 -0.949401 -0.940174 -0.927737 -0.911448 -0.890652 -0.864768 -0.833385 -0.796344 -0.753794 -0.706192 -0.654243 -0.598824 -0.540875 -0.481312 -0.420968 -0.360552 -0.300648 -0.241715 -0.184112 -0.128111 -0.073920 -0.021691 --0.045413 --0.086976 --0.132375 --0.181645 --0.234671 --0.291146 --0.350528 --0.412014 --0.474546 --0.536847 --0.597512 --0.655131 --0.708433 --0.756422 --0.798475 --0.834366 --0.864242 --0.888536 --0.907862 --0.922914 --0.934374 --0.942856 --0.948870 --0.952808 --0.954940 --0.955417 --0.954272 --0.951424 --0.946672 --0.939696 --0.930051 --0.917178 --0.900424 --0.879088 --0.852495 --0.820091 --0.781559 --0.736907 --0.686535 --0.631217 --0.572029 --0.510212 --0.447014 --0.383550 --0.320690 --0.259014 --0.198798 --0.140051 --0.082562 --0.025961 -0.030215 -0.086466 -0.143270 -0.201033 -0.260034 -0.320375 -0.381939 -0.444344 -0.506933 -0.568776 -0.628723 -0.685510 -0.737899 -0.784843 -0.825634 -0.859987 -0.888046 -0.910305 -0.927484 -0.940390 -0.949802 -0.956397 -0.960706 -0.963106 -0.963823 -0.962938 -0.960390 -0.955988 -0.949401 -0.940174 -0.927737 -0.911448 -0.890652 -0.864768 -0.833385 -0.796344 -0.753794 -0.706192 -0.654243 -0.598824 -0.540875 -0.481312 -0.420968 -0.360552 -0.300648 -0.241715 -0.184112 -0.128111 -0.073920 -0.021691 --0.045413 --0.086976 --0.132375 --0.181645 --0.234671 --0.291146 --0.350528 --0.412014 --0.474546 --0.536847 --0.597512 --0.655131 --0.708433 --0.756422 --0.798475 --0.834366 --0.864242 --0.888536 --0.907862 --0.922914 --0.934374 --0.942856 --0.948870 --0.952808 --0.954940 --0.955417 --0.954272 --0.951424 --0.946672 --0.939696 --0.930051 --0.917178 --0.900424 --0.879088 --0.852495 --0.820091 --0.781559 --0.736907 --0.686535 --0.631217 --0.572029 --0.510212 --0.447014 --0.383550 --0.320690 --0.259014 --0.198798 --0.140051 --0.082562 --0.025961 -0.030215 -0.086466 -0.143270 -0.201033 -0.260034 -0.320375 -0.381939 -0.444344 -0.506933 -0.568776 -0.628723 -0.685510 -0.737899 -0.784843 -0.825634 -0.859987 -0.888046 -0.910305 -0.927484 -0.940390 -0.949802 -0.956397 -0.960706 -0.963106 -0.963823 -0.962938 -0.960390 -0.955988 -0.949401 -0.940174 -0.927737 -0.911448 -0.890652 -0.864768 -0.833385 -0.796344 -0.753794 -0.706192 -0.654243 -0.598824 -0.540875 -0.481312 -0.420968 -0.360552 -0.300648 -0.241715 -0.184112 -0.128111 -0.073920 -0.021691 --0.045413 --0.086976 --0.132375 --0.181645 --0.234671 --0.291146 --0.350528 --0.412014 --0.474546 --0.536847 --0.597512 --0.655131 --0.708433 --0.756422 --0.798475 --0.834366 --0.864242 --0.888536 --0.907862 --0.922914 --0.934374 --0.942856 --0.948870 --0.952808 --0.954940 --0.955417 --0.954272 --0.951424 --0.946672 --0.939696 --0.930051 --0.917178 --0.900424 --0.879088 --0.852495 --0.820091 --0.781559 --0.736907 --0.686535 --0.631217 --0.572029 --0.510212 --0.447014 --0.383550 --0.320690 --0.259014 --0.198798 --0.140051 --0.082562 --0.025961 -0.030215 -0.086466 -0.143270 -0.201033 -0.260034 -0.320375 -0.381939 -0.444344 -0.506933 -0.568776 -0.628723 -0.685510 -0.737899 -0.784843 -0.825634 -0.859987 -0.888046 -0.910305 -0.927484 -0.940390 -0.949802 -0.956397 -0.960706 -0.963106 -0.963823 -0.962938 -0.960390 -0.955988 -0.949401 -0.940174 -0.927737 -0.911448 -0.890652 -0.864768 -0.833385 -0.796344 -0.753794 -0.706192 -0.654243 -0.598824 -0.540875 -0.481312 -0.420968 -0.360552 -0.300648 -0.241715 -0.184112 -0.128111 -0.073920 -0.021691 Index: test_bench/data_out_tb.txt =================================================================== --- test_bench/data_out_tb.txt (revision 8) +++ test_bench/data_out_tb.txt (nonexistent) @@ -1,495 +0,0 @@ --11 --18 --41 --40 --62 --74 --86 --112 --123 --133 --154 --173 --181 --196 --205 --212 --223 --226 --233 --235 --238 --241 --243 --243 --245 --244 --243 --243 --242 --241 --238 --235 --231 --224 --217 --208 --199 --187 --170 --159 --143 --128 --113 --96 --78 --68 --53 --33 --23 -0 -10 -23 -43 -54 -67 -80 -105 -109 -129 -141 -161 -178 -189 -204 -211 -221 -227 -235 -236 -241 -243 -244 -247 -246 -246 -246 -246 -243 -242 -237 -239 -236 -227 -222 -211 -203 -192 -181 -167 -153 -139 -125 -109 -92 -77 -59 -49 -30 -19 -5 --11 --18 --41 --40 --62 --74 --86 --112 --123 --133 --154 --173 --181 --196 --205 --212 --223 --226 --233 --235 --238 --241 --243 --243 --245 --244 --243 --243 --242 --241 --238 --235 --231 --224 --217 --208 --199 --187 --170 --159 --143 --128 --113 --96 --78 --68 --53 --33 --23 -0 -10 -23 -43 -54 -67 -80 -105 -109 -129 -141 -161 -178 -189 -204 -211 -221 -227 -235 -236 -241 -243 -244 -247 -246 -246 -246 -246 -243 -242 -237 -239 -236 -227 -222 -211 -203 -192 -181 -167 -153 -139 -125 -109 -92 -77 -59 -49 -30 -19 -5 --11 --18 --41 --40 --62 --74 --86 --112 --123 --133 --154 --173 --181 --196 --205 --212 --223 --226 --233 --235 --238 --241 --243 --243 --245 --244 --243 --243 --242 --241 --238 --235 --231 --224 --217 --208 --199 --187 --170 --159 --143 --128 --113 --96 --78 --68 --53 --33 --23 -0 -10 -23 -43 -54 -67 -80 -105 -109 -129 -141 -161 -178 -189 -204 -211 -221 -227 -235 -236 -241 -243 -244 -247 -246 -246 -246 -246 -243 -242 -237 -239 -236 -227 -222 -211 -203 -192 -181 -167 -153 -139 -125 -109 -92 -77 -59 -49 -30 -19 -5 --11 --18 --41 --40 --62 --74 --86 --112 --123 --133 --154 --173 --181 --196 --205 --212 --223 --226 --233 --235 --238 --241 --243 --243 --245 --244 --243 --243 --242 --241 --238 --235 --231 --224 --217 --208 --199 --187 --170 --159 --143 --128 --113 --96 --78 --68 --53 --33 --23 -0 -10 -23 -43 -54 -67 -80 -105 -109 -129 -141 -161 -178 -189 -204 -211 -221 -227 -235 -236 -241 -243 -244 -247 -246 -246 -246 -246 -243 -242 -237 -239 -236 -227 -222 -211 -203 -192 -181 -167 -153 -139 -125 -109 -92 -77 -59 -49 -30 -19 -5 --11 --18 --41 --40 --62 --74 --86 --112 --123 --133 --154 --173 --181 --196 --205 --212 --223 --226 --233 --235 --238 --241 --243 --243 --245 --244 --243 --243 --242 --241 --238 --235 --231 --224 --217 --208 --199 --187 --170 --159 --143 --128 --113 --96 --78 --68 --53 --33 --23 -0 -10 -23 -43 -54 -67 -80 -105 -109 -129 -141 -161 -178 -189 -204 -211 -221 -227 -235 -236 -241 -243 -244 -247 -246 -246 -246 -246 -243 -242 -237 -239 -236 -227 -222 -211 -203 -192 -181 -167 -153 -139 -125 -109 -92 -77 Index: test_bench/show_res.m =================================================================== --- test_bench/show_res.m (revision 8) +++ test_bench/show_res.m (nonexistent) @@ -1,11 +0,0 @@ -y_tb = load('data_out_tb.txt'); -plot(y_tb./256,'b'); -hold on; -y_oct = load('data_out_oct.txt'); -plot(y_oct,'r'); -title('results from octave vs results from ghdl test bench') -legend({"results from ghdl","results from octave"}) - -disp('press any key to continue'); - -pause; Index: ANN_kernel/RTL_VHDL_files/wb_init.vhd =================================================================== --- ANN_kernel/RTL_VHDL_files/wb_init.vhd (revision 8) +++ ANN_kernel/RTL_VHDL_files/wb_init.vhd (nonexistent) @@ -1,72 +0,0 @@ -library ieee; -use ieee.std_logic_1164.all; -use ieee.numeric_std.all; -library work; -use work.support_pkg.all; -use work.layers_pkg.all; -package wb_init is - type ramd_type is array (3 downto 0) of std_logic_vector(NbitW-1 downto 0); - type layer_ram is array (3 downto 0) of ramd_type; - type w_ram is array (integer range <>) of layer_ram; - type b_type is array (integer range <>) of ramd_type; - constant w_init : w_ram := - ( - 0 => ( - 0 => ( - 0 => real2stdlv(NbitW,-0.8964), - 1 => real2stdlv(NbitW,-2.6600), - others =>(others => '0') - ), - others=>(others =>(others => '0')) - ), - 1 => ( - 0 => ( - 0 => real2stdlv(NbitW,-5.6056), - 1 => real2stdlv(NbitW,-1.5274), - 2 => real2stdlv(NbitW,-8.4909), - others =>(others => '0') - ), - 1 => ( - 0 => real2stdlv(NbitW,1.0885), - 1 => real2stdlv(NbitW,0.7244), - 2 => real2stdlv(NbitW,3.8977), - others =>(others => '0') - ), - others=>(others =>(others => '0')) - ), - 2 => ( - 0 => ( - 0 => real2stdlv(NbitW,6.0449), - others =>(others => '0') - ), - 1 => ( - 0 => real2stdlv(NbitW,-2.8724), - others =>(others => '0') - ), - 2 => ( - 0 => real2stdlv(NbitW,-5.0188), - others =>(others => '0') - ), - others=>(others =>(others => '0')) - ) - ); - - constant b_init : b_type := - ( - 0 => ( - 0 => real2stdlv(NbitW,(2.0**LSB_OUT)*(0.3704)), - 1 => real2stdlv(NbitW,(2.0**LSB_OUT)*(0.7149)), - others =>(others => '0') - ), - 1 => ( - 0 => real2stdlv(NbitW,(2.0**LSB_OUT)*(2.8121)), - 1 => real2stdlv(NbitW,(2.0**LSB_OUT)*(0.3690)), - 2 => real2stdlv(NbitW,(2.0**LSB_OUT)*(2.4685)), - others =>(others => '0') - ), - 2 => ( - 0 => real2stdlv(NbitW,(2.0**LSB_OUT)*(0.0784)), - others =>(others => '0') - ) - ); -end wb_init; Index: ANN_kernel/RTL_VHDL_files/support_pkg.vhd =================================================================== --- ANN_kernel/RTL_VHDL_files/support_pkg.vhd (revision 8) +++ ANN_kernel/RTL_VHDL_files/support_pkg.vhd (nonexistent) @@ -1,37 +0,0 @@ -library IEEE; -use IEEE.STD_LOGIC_1164.all; -use IEEE.numeric_std.all; -use work.layers_pkg.all; -package support_pkg is - - -- generic constants: - - constant NbitIn : natural := 12; - constant LSB_In : natural := 8; - constant Nbit : natural := 12; - constant NbitW : natural := 24; - constant LSB_OUT : natural := 8; - constant Nlayer : natural := 3; - - constant NbitOut : integer := 12 ; - constant NumIn : integer := 1; - constant NumN : int_vector(Nlayer-1 downto 0) := assign_ints("2 3 1",Nlayer); - constant LSbit : int_vector(Nlayer-1 downto 0) := assign_ints("8 8 8",Nlayer); - constant NbitO : int_vector(Nlayer-1 downto 0) := assign_ints("12 12 12",Nlayer); - constant l_type : string := "SP PS SP"; -- Layer type of each layer - constant f_type : string := "siglu2 siglu2 siglu2"; -- Activation function type of each layer - - function real2stdlv (bitW : natural; din : real) return std_logic_vector; - -end support_pkg; - -package body support_pkg is - -function real2stdlv (bitW : natural; din : real) return std_logic_vector is - variable vres : signed(bitW-1 downto 0) := (others => '0'); - begin -- real2stdlv - vres:= to_signed(integer(din*(2.0**(LSB_OUT))), bitW); - return std_logic_vector(vres); - end real2stdlv; - -end support_pkg; \ No newline at end of file Index: ANN_kernel/RTL_VHDL_files/layers_pkg.vhd =================================================================== --- ANN_kernel/RTL_VHDL_files/layers_pkg.vhd (revision 8) +++ ANN_kernel/RTL_VHDL_files/layers_pkg.vhd (revision 7) @@ -1,304 +1,301 @@ ----------------------------------------------------------------------------------- --- Company: CEI - UPM --- Engineer: David Aledo --- --- Create Date: 01.10.2015 --- Design Name: Configurable ANN --- Pakage Name: layers_pkg --- Project Name: --- Target Devices: --- Tool Versions: --- Description: define array types for generics, functions to give them values from --- string generics, and other help functions --- Dependencies: --- --- Revision: --- Revision 0.01 - File Created --- Additional Comments: --- ----------------------------------------------------------------------------------- - -library IEEE; -use IEEE.STD_LOGIC_1164.all; -use IEEE.numeric_std.all; - ---library proc_common_v3_00_a; -- Deprecated libray from XPS tool ---use proc_common_v3_00_a.proc_common_pkg.all; - -package layers_pkg is - - -- Array types for generics: - type int_vector is array (natural range <>) of integer; -- Generic integer vector - type ltype_vector is array (integer range <>) of string(1 to 2); -- Layer type vector - type ftype_vector is array (integer range <>) of string(1 to 6); -- Activation function type vector - -- Note: these strings cannot be unconstrined - - -- Functions to assign values to vector types from string generics: - -- Arguments: - -- str_v : string to be converted - -- n : number of elements of the vector - -- Return: assigned vector - function assign_ints(str_v : string; n : integer) return int_vector; - function assign_ltype(str_v : string; n : integer) return ltype_vector; - function assign_ftype(str_v : string; n : integer) return ftype_vector; - - -- Other functions: - - -- Argument: c : character to be checked - -- Return: TRUE if c is 0, 1, 2, 3, 4, 5, 6, 7, 8 or 9 - function is_digit(c : character) return boolean; - - - -- Base two logarithm for int_vector: - -- Arguments: - -- v : integer vector - -- n : number of elements of the vector - -- Return : integer vector of the base two logarithms of each elment of v - function log2(v : int_vector; n : integer) return int_vector; - - -- Calculate the total weight and bias memory address length: - -- Arguments: - -- NumIn : number of inputs of the network - -- NumN : number of neurons of each layer - -- n : number of layers (number of elements of NumN) - -- Return: total weight and bias memory address length (integer) - function calculate_addr_l(NumIn : integer; NumN : int_vector; n : integer) return integer; - - -- Assign the weight and bias memory address lenght of each layer: - -- Arguments: - -- NumIn : number of inputs of the network - -- NumN : number of neurons of each layer - -- n : number of layers (number of elements of NumN and the return integer vector) - -- Return: weight and bias memory address lenght of each layer (integer vector) - function assign_addrl(NumIn : integer; NumN : int_vector; n : integer) return int_vector; - - -- Calculate the maximum of the multiplications of two vectors element by element - -- Arguments: - -- v1 : input vector 1 - -- v2 : input vector 2 - -- Return: maximum of the multiplications of two vectors element by element - function calculate_max_mul(v1 : int_vector; v2 : int_vector) return integer; - - -- Returns the max value of the input integer vector: - function calculate_max(v : int_vector) return integer; - - -- Adding needed functions from the deprecated libray proc_common_v3_00_a: - function max2 (num1, num2 : integer) return integer; - function log2(x : natural) return integer; - -end layers_pkg; - -package body layers_pkg is - - function max2 (num1, num2 : integer) return integer is - begin - if num1 >= num2 then - return num1; - else - return num2; - end if; - end function max2; - --- Function log2 -- returns number of bits needed to encode x choices --- x = 0 returns 0 --- x = 1 returns 0 --- x = 2 returns 1 --- x = 4 returns 2, etc. - function log2(x : natural) return integer is - variable i : integer := 0; - variable val: integer := 1; - begin - if x = 0 then - return 0; - else - for j in 0 to 29 loop -- for loop for XST - if val >= x then null; - else - i := i+1; - val := val*2; - end if; - end loop; - -- Fix per CR520627 XST was ignoring this anyway and printing a - -- Warning in SRP file. This will get rid of the warning and not - -- impact simulation. - -- synthesis translate_off - assert val >= x - report "Function log2 received argument larger" & - " than its capability of 2^30. " - severity failure; - -- synthesis translate_on - return i; - end if; - end function log2; - - - function is_digit(c : character) return boolean is - begin - case c is - when '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' => return true; - when others => return false; - end case; - end is_digit; - - - -- Assign values to a integer vector from a string: - -- Arguments: - -- str_v : string to be converted - -- n : number of elements of the vector - -- Return: assigned integer vector - function assign_ints(str_v : string; n : integer) return int_vector is - variable i : integer := n-1; ---- element counter - variable d_power : integer := 1; -- decimal power - variable ret : int_vector(n-1 downto 0) := (others => 0); -- return value - begin - for c in str_v'length downto 1 loop -- read every character in str_v - if str_v(c) = ' ' then -- a space separates a new element - assert i > 0 - report "Error in assign_ints: number of elements in string is greater than n." - severity error; - i := i -1; -- decrease element counter to start calculate a new element - d_power := 1; -- reset the decimal power to 1 - else - assert is_digit(str_v(c)) -- assert the new character is a digit - report "Error in assign_ints: character " & str_v(c) & " is not a digit." - severity error; - -- add the value of the new charactar to the element calculation ( + ("" - "0") * d_power): - ret(i) := ret(i) + (character'pos(str_v(c))-character'pos('0'))*d_power; - d_power := d_power*10; -- increase the decimal power for the next digit - end if; - end loop; - assert i = 0 - report "Error in assign_ints: number of elements in string is less than n." - severity error; - return ret; - end assign_ints; - - -- Assign values to an activation function type vector from a string: - -- Arguments: - -- str_v : string to be converted - -- n : number of elements of the vector - -- Return: assigned activation function type vector - function assign_ftype(str_v : string; n : integer) return ftype_vector is - variable i : integer := 0; -- element counter - variable l : integer := 1; -- element length counter - variable ret : ftype_vector(n-1 downto 0) := (others => "linear"); -- return value - begin - for c in 1 to str_v'length loop -- read every character in str_v - if str_v(c) = ' ' then -- a space separates a new element - i := i +1; -- increase element counter to start calculate a new element - l := 1; -- reset element length counter - else - ret(i)(l) := str_v(c); - l := l +1; -- increase element length counter - end if; - end loop; - assert i = n-1 - report "Error in assign_ftype: number of elements in string is less than n." - severity error; - return ret; - end assign_ftype; - - -- Assign values to an layer type vector from a string: - -- Arguments: - -- str_v : string to be converted - -- n : number of elements of the vector - -- Return: assigned layer type vector - function assign_ltype(str_v : string; n : integer) return ltype_vector is - variable i : integer := 0; -- element counter - variable l : integer := 1; -- element length counter - variable ret : ltype_vector(n-1 downto 0) := (others => "SP"); -- return value - begin - for c in 1 to str_v'length loop - if str_v(c) = ' ' then -- a space separates a new element - i := i +1; -- increase element counter to start calculate a new element - l := 1; -- reset element length counter - else - assert str_v(c) = 'P' or str_v(c) = 'S' - report "Error in assign_ltype: character " & str_v(c) & " is not 'P' (parallel) or 'S' (serial)." - severity error; - ret(i)(l) := str_v(c); - l := l +1; -- increase element length counter - end if; - end loop; - assert i = n-1 - report "Error in assign_ltype: number of elements do not coincide with number of introduced elements." - severity error; - return ret; - end assign_ltype; - - -- Calculate the total weight and bias memory address length: - -- Arguments: - -- NumIn : number of inputs of the network - -- NumN : number of neurons of each layer - -- n : number of layers (number of elements of NumN) - -- Return: total weight and bias memory address length (integer) - function calculate_addr_l(NumIn : integer; NumN : int_vector; n : integer) return integer is -- matrix + b_sel - variable addr_l : integer := log2(NumIn)+log2(NumN(0)); -- return value. Initialized with the weight memory length of the first layer - begin - -- Calculate the maximum of the weight memory length: - for i in 1 to n-1 loop - addr_l := max2( addr_l, log2(NumN(i-1)+log2(NumN(i))) ); - end loop; - addr_l := addr_l +1; -- add bias select bit - return addr_l; - end calculate_addr_l; - - -- Base two logarithm for int_vector: - -- Arguments: - -- v : integer vector - -- n : number of elements of the vector - -- Return : integer vector of the base two logarithms of each elment of v - function log2(v : int_vector; n : integer) return int_vector is - variable ret : int_vector(n-1 downto 0); -- return value - begin - -- for each element of v, calculate its base two logarithm: - for i in 0 to n-1 loop - ret(i) := log2(v(i)); - end loop; - return ret; - end log2; - - -- Assign the weight and bias memory address lenght of each layer: - -- Arguments: - -- NumIn : number of inputs of the network - -- NumN : number of neurons of each layer - -- n : number of layers (number of elements of NumN and the return integer vector) - -- Return: weight and bias memory address lenght of each layer (integer vector) - function assign_addrl(NumIn : integer; NumN : int_vector; n : integer) return int_vector is - variable ret : int_vector(n-1 downto 0); -- return value - begin - ret(0) := log2(NumIn)+log2(NumN(0)); -- Weight memory length of the first layer - for i in 1 to n-1 loop - ret(i) := log2(NumN(i-1))+log2(NumN(i)); - end loop; - return ret; - end assign_addrl; - - -- Returns the max value of the input integer vector: - function calculate_max(v : int_vector) return integer is - variable ac_max : integer := 0; -- return value - begin - for i in 0 to v'length-1 loop - ac_max := max2(ac_max,v(i)); - end loop; - return ac_max; - end calculate_max; - - -- Calculate the maximum of the multiplications of two vectors element by element - -- Arguments: - -- v1 : input vector 1 - -- v2 : input vector 2 - -- Return: maximum of the multiplications of two vectors element by element - function calculate_max_mul(v1 : int_vector; v2 : int_vector) return integer is - variable ac_max : integer := 0; - begin - assert v1'length = v2'length - report "Error in calculate_max_mul: vector's length do not coincide." - severity error; - for i in 0 to v1'length-1 loop - ac_max := max2(ac_max,v1(i)*v2(i)); - end loop; - return ac_max; - end calculate_max_mul; - -end layers_pkg; +---------------------------------------------------------------------------------- +-- Company: CEI - UPM +-- Engineer: David Aledo +-- +-- Create Date: 01.10.2015 +-- Design Name: Configurable ANN +-- Pakage Name: layers_pkg +-- Project Name: +-- Target Devices: +-- Tool Versions: +-- Description: define array types for generics, functions to give them values from +-- string generics, and other help functions +-- Dependencies: +-- +-- Revision: +-- Revision 0.01 - File Created +-- Additional Comments: +-- +---------------------------------------------------------------------------------- + +library IEEE; +use IEEE.STD_LOGIC_1164.all; + +--library proc_common_v3_00_a; -- Deprecated libray from XPS tool +--use proc_common_v3_00_a.proc_common_pkg.all; + +package layers_pkg is + + -- Array types for generics: + type int_vector is array (natural range <>) of integer; -- Generic integer vector + type ltype_vector is array (integer range <>) of string(1 to 2); -- Layer type vector + type ftype_vector is array (integer range <>) of string(1 to 6); -- Activation function type vector + -- Note: these strings cannot be unconstrined + + -- Functions to assign values to vector types from string generics: + -- Arguments: + -- str_v : string to be converted + -- n : number of elements of the vector + -- Return: assigned vector + function assign_ints(str_v : string; n : integer) return int_vector; + function assign_ltype(str_v : string; n : integer) return ltype_vector; + function assign_ftype(str_v : string; n : integer) return ftype_vector; + + -- Other functions: + + -- Argument: c : character to be checked + -- Return: TRUE if c is 0, 1, 2, 3, 4, 5, 6, 7, 8 or 9 + function is_digit(c : character) return boolean; + + -- Base two logarithm for int_vector: + -- Arguments: + -- v : integer vector + -- n : number of elements of the vector + -- Return : integer vector of the base two logarithms of each elment of v + function log2(v : int_vector; n : integer) return int_vector; + + -- Calculate the total weight and bias memory address length: + -- Arguments: + -- NumIn : number of inputs of the network + -- NumN : number of neurons of each layer + -- n : number of layers (number of elements of NumN) + -- Return: total weight and bias memory address length (integer) + function calculate_addr_l(NumIn : integer; NumN : int_vector; n : integer) return integer; + + -- Assign the weight and bias memory address lenght of each layer: + -- Arguments: + -- NumIn : number of inputs of the network + -- NumN : number of neurons of each layer + -- n : number of layers (number of elements of NumN and the return integer vector) + -- Return: weight and bias memory address lenght of each layer (integer vector) + function assign_addrl(NumIn : integer; NumN : int_vector; n : integer) return int_vector; + + -- Calculate the maximum of the multiplications of two vectors element by element + -- Arguments: + -- v1 : input vector 1 + -- v2 : input vector 2 + -- Return: maximum of the multiplications of two vectors element by element + function calculate_max_mul(v1 : int_vector; v2 : int_vector) return integer; + + -- Returns the max value of the input integer vector: + function calculate_max(v : int_vector) return integer; + + -- Adding needed functions from the deprecated libray proc_common_v3_00_a: + function max2 (num1, num2 : integer) return integer; + function log2(x : natural) return integer; + +end layers_pkg; + +package body layers_pkg is + + function max2 (num1, num2 : integer) return integer is + begin + if num1 >= num2 then + return num1; + else + return num2; + end if; + end function max2; + +-- Function log2 -- returns number of bits needed to encode x choices +-- x = 0 returns 0 +-- x = 1 returns 0 +-- x = 2 returns 1 +-- x = 4 returns 2, etc. + function log2(x : natural) return integer is + variable i : integer := 0; + variable val: integer := 1; + begin + if x = 0 then + return 0; + else + for j in 0 to 29 loop -- for loop for XST + if val >= x then null; + else + i := i+1; + val := val*2; + end if; + end loop; + -- Fix per CR520627 XST was ignoring this anyway and printing a + -- Warning in SRP file. This will get rid of the warning and not + -- impact simulation. + -- synthesis translate_off + assert val >= x + report "Function log2 received argument larger" & + " than its capability of 2^30. " + severity failure; + -- synthesis translate_on + return i; + end if; + end function log2; + + + function is_digit(c : character) return boolean is + begin + case c is + when '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' => return true; + when others => return false; + end case; + end is_digit; + + -- Assign values to a integer vector from a string: + -- Arguments: + -- str_v : string to be converted + -- n : number of elements of the vector + -- Return: assigned integer vector + function assign_ints(str_v : string; n : integer) return int_vector is + variable i : integer := n-1; ---- element counter + variable d_power : integer := 1; -- decimal power + variable ret : int_vector(n-1 downto 0) := (others => 0); -- return value + begin + for c in str_v'length downto 1 loop -- read every character in str_v + if str_v(c) = ' ' then -- a space separates a new element + assert i > 0 + report "Error in assign_ints: number of elements in string is greater than n." + severity error; + i := i -1; -- decrease element counter to start calculate a new element + d_power := 1; -- reset the decimal power to 1 + else + assert is_digit(str_v(c)) -- assert the new character is a digit + report "Error in assign_ints: character " & str_v(c) & " is not a digit." + severity error; + -- add the value of the new charactar to the element calculation ( + ("" - "0") * d_power): + ret(i) := ret(i) + (character'pos(str_v(c))-character'pos('0'))*d_power; + d_power := d_power*10; -- increase the decimal power for the next digit + end if; + end loop; + assert i = 0 + report "Error in assign_ints: number of elements in string is less than n." + severity error; + return ret; + end assign_ints; + + -- Assign values to an activation function type vector from a string: + -- Arguments: + -- str_v : string to be converted + -- n : number of elements of the vector + -- Return: assigned activation function type vector + function assign_ftype(str_v : string; n : integer) return ftype_vector is + variable i : integer := 0; -- element counter + variable l : integer := 1; -- element length counter + variable ret : ftype_vector(n-1 downto 0) := (others => "linear"); -- return value + begin + for c in 1 to str_v'length loop -- read every character in str_v + if str_v(c) = ' ' then -- a space separates a new element + i := i +1; -- increase element counter to start calculate a new element + l := 1; -- reset element length counter + else + ret(i)(l) := str_v(c); + l := l +1; -- increase element length counter + end if; + end loop; + assert i = n-1 + report "Error in assign_ftype: number of elements in string is less than n." + severity error; + return ret; + end assign_ftype; + + -- Assign values to an layer type vector from a string: + -- Arguments: + -- str_v : string to be converted + -- n : number of elements of the vector + -- Return: assigned layer type vector + function assign_ltype(str_v : string; n : integer) return ltype_vector is + variable i : integer := 0; -- element counter + variable l : integer := 1; -- element length counter + variable ret : ltype_vector(n-1 downto 0) := (others => "SP"); -- return value + begin + for c in 1 to str_v'length loop + if str_v(c) = ' ' then -- a space separates a new element + i := i +1; -- increase element counter to start calculate a new element + l := 1; -- reset element length counter + else + assert str_v(c) = 'P' or str_v(c) = 'S' + report "Error in assign_ltype: character " & str_v(c) & " is not 'P' (parallel) or 'S' (serial)." + severity error; + ret(i)(l) := str_v(c); + l := l +1; -- increase element length counter + end if; + end loop; + assert i = n-1 + report "Error in assign_ltype: number of elements do not coincide with number of introduced elements." + severity error; + return ret; + end assign_ltype; + + -- Calculate the total weight and bias memory address length: + -- Arguments: + -- NumIn : number of inputs of the network + -- NumN : number of neurons of each layer + -- n : number of layers (number of elements of NumN) + -- Return: total weight and bias memory address length (integer) + function calculate_addr_l(NumIn : integer; NumN : int_vector; n : integer) return integer is -- matrix + b_sel + variable addr_l : integer := log2(NumIn)+log2(NumN(0)); -- return value. Initialized with the weight memory length of the first layer + begin + -- Calculate the maximum of the weight memory length: + for i in 1 to n-1 loop + addr_l := max2( addr_l, log2(NumN(i-1))+log2(NumN(i)) ); + end loop; + addr_l := addr_l +1; -- add bias select bit + return addr_l; + end calculate_addr_l; + + -- Base two logarithm for int_vector: + -- Arguments: + -- v : integer vector + -- n : number of elements of the vector + -- Return : integer vector of the base two logarithms of each elment of v + function log2(v : int_vector; n : integer) return int_vector is + variable ret : int_vector(n-1 downto 0); -- return value + begin + -- for each element of v, calculate its base two logarithm: + for i in 0 to n-1 loop + ret(i) := log2(v(i)); + end loop; + return ret; + end log2; + + -- Assign the weight and bias memory address lenght of each layer: + -- Arguments: + -- NumIn : number of inputs of the network + -- NumN : number of neurons of each layer + -- n : number of layers (number of elements of NumN and the return integer vector) + -- Return: weight and bias memory address lenght of each layer (integer vector) + function assign_addrl(NumIn : integer; NumN : int_vector; n : integer) return int_vector is + variable ret : int_vector(n-1 downto 0); -- return value + begin + ret(0) := log2(NumIn)+log2(NumN(0)); -- Weight memory length of the first layer + for i in 1 to n-1 loop + ret(i) := log2(NumN(i-1))+log2(NumN(i)); + end loop; + return ret; + end assign_addrl; + + -- Returns the max value of the input integer vector: + function calculate_max(v : int_vector) return integer is + variable ac_max : integer := 0; -- return value + begin + for i in 0 to v'length-1 loop + ac_max := max2(ac_max,v(i)); + end loop; + return ac_max; + end calculate_max; + + -- Calculate the maximum of the multiplications of two vectors element by element + -- Arguments: + -- v1 : input vector 1 + -- v2 : input vector 2 + -- Return: maximum of the multiplications of two vectors element by element + function calculate_max_mul(v1 : int_vector; v2 : int_vector) return integer is + variable ac_max : integer := 0; + begin + assert v1'length = v2'length + report "Error in calculate_max_mul: vector's length do not coincide." + severity error; + for i in 0 to v1'length-1 loop + ac_max := max2(ac_max,v1(i)*v2(i)); + end loop; + return ac_max; + end calculate_max_mul; + +end layers_pkg; Index: ANN_kernel/RTL_VHDL_files/ann.vhd =================================================================== --- ANN_kernel/RTL_VHDL_files/ann.vhd (revision 8) +++ ANN_kernel/RTL_VHDL_files/ann.vhd (revision 7) @@ -28,7 +28,6 @@ entity ann is generic ( - WBinit : boolean := false; Nlayer : integer := 2; ---- Number of layers NbitW : natural := 16; ---- Bit width of weights and biases NumIn : natural := 64; ---- Number of inputs to the network @@ -118,8 +117,6 @@ first_layerSP_top_inst: entity work.layerSP_top generic map ( - WBinit => WBinit , - LNum => 0 , NumN => NumN(0), -- Number of neurons in the first layer NumIn => NumIn, ---- Number of inputs of the first layer NbitIn => NbitIn, --- Bit width of the input data @@ -181,8 +178,7 @@ generic map ( f_type => ftype_v(i-1), -- Activation function type of the previous layer (i-1) - Nbit => NbitO(i-1), -- Bit width of the outputs of the previous layer (i-1) - lsbit => LSbit(i-1) -- least significant bit of activation function + Nbit => NbitO(i-1) --- Bit width of the outputs of the previous layer (i-1) ) port map ( @@ -206,8 +202,7 @@ generic map ( f_type => ftype_v(i-1), -- Activation function type of the previous layer (i-1) - Nbit => NbitO(i-1), -- Bit width of the outputs of the previous layer (i-1) - lsbit => LSbit(i-1) -- least significant bit of activation function + Nbit => NbitO(i-1) --- Bit width of the outputs of the previous layer (i-1) ) port map ( @@ -231,8 +226,7 @@ generic map ( f_type => ftype_v(i-1), -- Activation function type of the previous layer (i-1) - Nbit => NbitO(i-1), -- Bit width of the outputs of the previous layer (i-1) - lsbit => LSbit(i-1) -- least significant bit of activation function + Nbit => NbitO(i-1) --- Bit width of the outputs of the previous layer (i-1) ) port map ( @@ -251,8 +245,7 @@ generic map ( f_type => ftype_v(i-1), -- Activation function type of the previous layer (i-1) - Nbit => NbitO(i-1) , -- Bit width of the outputs of the previous layer (i-1) - lsbit => LSbit(i-1) -- least significant bit of activation function + Nbit => NbitO(i-1) --- Bit width of the outputs of the previous layer (i-1) ) port map ( @@ -277,8 +270,7 @@ generic map ( f_type => ftype_v(i-1), - Nbit => NbitO(i-1), - lsbit => LSbit(i-1) -- least significant bit of activation function + Nbit => NbitO(i-1) ) port map ( @@ -317,8 +309,6 @@ layerSP_top_inst: entity work.layerSP_top generic map ( - WBinit => WBinit , - LNum => i , NumN => NumN(i), --- Number of neurons in layer (i) NumIn => NumN(i-1), -- Number of inputs, is the number of neurons in previous layer (i-1) NbitIn => NbitO(i-1), -- Bit width of the input data, is the bit width of output data of layer (i-1) @@ -354,9 +344,7 @@ if ltype_v(i) = "PS" generate layerPS_top_inst: entity work.layerPS_top generic map - ( - WBinit => WBinit , - LNum => i , + ( NumN => NumN(i), --- Number of neurons in layer (i) NumIn => NumN(i-1), -- Number of inputs, is the number of neurons in previous layer (i-1) NbitIn => NbitO(i-1), -- Bit width of the input data, is the bit width of output data of layer (i-1) @@ -392,9 +380,9 @@ if ltype_v(i) = "PP" generate -- TODO: instance a full parallel layer. At current version this layer type has not been developed. -- synthesis translate_off - --assert l_type(i) /= "PP" - -- report "Current version does not accept parallel-input parallel-output (PP) layer type." - -- severity failure; + assert l_type(i) /= "PP" + report "Current version does not accept parallel-input parallel-output (PP) layer type." + severity failure; -- synthesis translate_on -- TODO: delete above lines when instantiate the parallel-input parallel-output layer. end generate; @@ -426,8 +414,7 @@ generic map ( f_type => ftype_v(Nlayer-1), -- Activation function type of the last layer (Nlayer-1) - Nbit => NbitO(Nlayer-1), --- Bit width of the outputs of the last layer (Nlayer-1) - lsbit => LSbit(Nlayer-1) -- least significant bit of activation function + Nbit => NbitO(Nlayer-1) --- Bit width of the outputs of the last layer (Nlayer-1) ) port map ( @@ -448,8 +435,7 @@ generic map ( f_type => ftype_v(Nlayer-1), -- Activation function type of the last layer (Nlayer-1) - Nbit => NbitO(Nlayer-1), -- Bit width of the outputs of the last layer (Nlayer-1) - lsbit => LSbit(Nlayer-1) -- least significant bit of activation function + Nbit => NbitO(Nlayer-1) --- Bit width of the outputs of the last layer (Nlayer-1) ) port map ( Index: ANN_kernel/RTL_VHDL_files/af_sigmoid.vhd =================================================================== --- ANN_kernel/RTL_VHDL_files/af_sigmoid.vhd (revision 8) +++ ANN_kernel/RTL_VHDL_files/af_sigmoid.vhd (revision 7) @@ -1,99 +1,99 @@ ----------------------------------------------------------------------------------- --- Company: CEI --- Engineer: Enrique Herrero --- --- Create Date: --- Design Name: Configurable ANN --- Module Name: af_sigmoid - Behavioral --- Project Name: --- Target Devices: --- Tool versions: --- Description: Sigmoid activation function implemented as a Look-Up-Table (LUT). --- --- Dependencies: --- --- Revision: --- Revision 0.01 - File Created --- Revision 1 - David Aledo --- Additional Comments: --- ----------------------------------------------------------------------------------- -library IEEE; -use IEEE.STD_LOGIC_1164.ALL; -use ieee.numeric_std.ALL; -use ieee.math_real.all; - - -entity af_sigmoid is - generic - ( - Nbit : natural := 8 - ); - port - ( - reset : in std_logic; - clk : in std_logic; - run_in : in std_logic; -- Start and input data validation - inputs : in std_logic_vector(Nbit-1 downto 0); -- Input data - run_out : out std_logic; -- Output data validation, run_in for the next layer - outputs : out std_logic_vector(Nbit-1 downto 0) -- Output data - ); -end af_sigmoid; - - -architecture Behavioral of af_sigmoid is - - -- Definition of internal modules, constants, signals, etc... - - -- Sigmoid parameters: - constant f0 : real := 1.0; -- Slope at the origin - constant fr : real := 2.0; -- fr = fmax - fmin - - signal dataIn: integer range (2**Nbit-1) downto 0; -- To convert std_logic_vector input to integer index for the LUT - type table_t is array(0 to (2**Nbit)-1) of std_logic_vector(Nbit-1 downto 0); -- LUT type - --- Function Sigmoidal: generates the Look-Up-Table for the sigmoid activation function: --- margin: maximun value of x.sim:/ann_tb/ann1/layers_insts(1)/multiple_activation_functions/multiple_activation_function_insts(1)/activation_function_inst/Sigmoid_f/siglut_inst/Activation - function Sigmoidal(margin:real;Nbit:natural) return table_t is - variable scale,x,y,w,t: real; - variable u: integer; - variable fbits: std_logic_vector(Nbit-1 downto 0); - variable table: table_t; - begin - scale := (2.0*margin)/(2.0**Nbit); -- Calculates gap between to points - x := -margin; - for idx in -(2**(Nbit-1)) to (2**(Nbit-1))-1 loop - y := ( fr / (1.0+exp(((-4.0*f0)/fr)*x)) ) - (fr/2.0); - w := y*(2.0**(Nbit-1)); -- Shifts bits to the left - t := round(w); - u := integer(t); - fbits := std_logic_vector(to_signed(u,Nbit)); - table(to_integer(to_unsigned(idx+(2**Nbit),Nbit))):= fbits; - x := x+scale; - end loop; - return table; - end Sigmoidal; - signal Table: table_t := Sigmoidal(1.0,Nbit); -- Generation of the LUT (at synthesis time) - -begin - - -- Description of the activation function - dataIn <= to_integer(unsigned(inputs)); - - Activation: process(clk,reset) - begin - if clk'event and clk = '1' then - if reset = '1' then - run_out <= '0'; - outputs <= (others => '0'); - else - if run_in = '1' then - run_out <='1'; - outputs <=Table(dataIn); -- Assigns output value from the LUT - else - run_out <='0'; - end if; - end if; - end if; - end process; -end Behavioral; +---------------------------------------------------------------------------------- +-- Company: CEI +-- Engineer: Enrique Herrero +-- +-- Create Date: +-- Design Name: Configurable ANN +-- Module Name: af_sigmoid - Behavioral +-- Project Name: +-- Target Devices: +-- Tool versions: +-- Description: Sigmoid activation function implemented as a Look-Up-Table (LUT). +-- +-- Dependencies: +-- +-- Revision: +-- Revision 0.01 - File Created +-- Revision 1 - David Aledo +-- Additional Comments: +-- +---------------------------------------------------------------------------------- +library IEEE; +use IEEE.STD_LOGIC_1164.ALL; +use ieee.numeric_std.ALL; +use ieee.math_real.all; + + +entity af_sigmoid is + generic + ( + Nbit : natural := 8 + ); + port + ( + reset : in std_logic; + clk : in std_logic; + run_in : in std_logic; -- Start and input data validation + inputs : in std_logic_vector(Nbit-1 downto 0); -- Input data + run_out : out std_logic; -- Output data validation, run_in for the next layer + outputs : out std_logic_vector(Nbit-1 downto 0) -- Output data + ); +end af_sigmoid; + + +architecture Behavioral of af_sigmoid is + + -- Definition of internal modules, constants, signals, etc... + + -- Sigmoid parameters: + constant f0 : real := 2.0; -- Slope at the origin + constant fr : real := 2.0; -- fr = fmax - fmin + + signal dataIn: integer range (2**Nbit-1) downto 0; -- To convert std_logic_vector input to integer index for the LUT + type table_t is array(0 to (2**Nbit)-1) of std_logic_vector(Nbit-1 downto 0); -- LUT type + +-- Function Sigmoidal: generates the Look-Up-Table for the sigmoid activation function: +-- margin: maximun value of x. + function Sigmoidal(margin:real;Nbit:natural) return table_t is + variable scale,x,y,w,t: real; + variable u: integer; + variable fbits: std_logic_vector(Nbit-1 downto 0); + variable table: table_t; + begin + scale := (2.0*margin)/(2.0**Nbit); -- Calculates gap between to points + x := -margin; + for idx in -(2**(Nbit-1)) to (2**(Nbit-1))-1 loop + y := (fr/(1.0+exp(((-4.0*f0)/fr)*x)))-(fr/2.0); + w := y*(2.0**(Nbit-1)); -- Shifts bits to the left + t := round(w); + u := integer(t); + fbits := std_logic_vector(to_signed(u,Nbit)); + table(to_integer(to_unsigned(idx+(2**Nbit),Nbit))):= fbits; + x := x+scale; + end loop; + return table; + end Sigmoidal; + signal Table: table_t := Sigmoidal(1.0,Nbit); -- Generation of the LUT (at synthesis time) + +begin + + -- Description of the activation function + dataIn <= to_integer(signed(inputs)); + + Activation: process(clk,reset) + begin + if clk'event and clk = '1' then + if reset = '1' then + run_out <= '0'; + outputs <= (others => '0'); + else + if run_in = '1' then + run_out <='1'; + outputs <=Table(dataIn); -- Assigns output value from the LUT + else + run_out <='0'; + end if; + end if; + end if; + end process; +end Behavioral; Index: ANN_kernel/RTL_VHDL_files/layerPS_top.vhd =================================================================== --- ANN_kernel/RTL_VHDL_files/layerPS_top.vhd (revision 8) +++ ANN_kernel/RTL_VHDL_files/layerPS_top.vhd (revision 7) @@ -22,9 +22,6 @@ use IEEE.STD_LOGIC_1164.ALL; use ieee.numeric_std.all; -library work; -use work.wb_init.all; -- initialization package, comment out when not used - -- Deprecated XPS library: --library proc_common_v3_00_a; --use proc_common_v3_00_a.proc_common_pkg.all; -- Only for simulation ( pad_power2() ) @@ -33,17 +30,15 @@ generic ( - WBinit : boolean := false; - LNum : natural := 0; ------- layer number (needed for initialization) - NumN : natural := 34; ------- Number of neurons of the layer - NumIn : natural := 27; ------- Number of inputs of each neuron - NbitIn : natural := 8; ------- Bit width of the input data - NbitW : natural := 1; ------- Bit width of weights and biases + NumN : natural := 64; ------- Number of neurons of the layer + NumIn : natural := 8; ------- Number of inputs of each neuron + NbitIn : natural := 12; ------- Bit width of the input data + NbitW : natural := 8; ------- Bit width of weights and biases NbitOut : natural := 8; ------- Bit width of the output data - lra_l : natural := 11; ------- Layer RAM address length. It should value log2(NumN)+log2(NumIn) - wra_l : natural := 5; ------- Weight RAM address length. It should value log2(NumIn) + lra_l : natural := 10; ------- Layer RAM address length. It should value log2(NumN)+log2(NumIn) + wra_l : natural := 3; ------- Weight RAM address length. It should value log2(NumIn) bra_l : natural := 6; ------- Bias RAM address length. It should value log2(NumN) - LSbit : natural := 6 ------- Less significant bit of the outputs + LSbit : natural := 4 ------- Less significant bit of the outputs ); port @@ -69,44 +64,14 @@ architecture Behavioral of layerPS_top is + --type ramd_type is array (pad_power2(NumN)-1 downto 0) of std_logic_vector(NbitW-1 downto 0); -- Optimal: 32 or 64 spaces -- pad_power2() only for simulation + --type layer_ram is array (pad_power2(NumIn)-1 downto 0) of ramd_type; type ramd_type is array (NumN-1 downto 0) of std_logic_vector(NbitW-1 downto 0); -- Optimal: 32 or 64 spaces type layer_ram is array (NumIn-1 downto 0) of ramd_type; type outm_type is array (NumIn-1 downto 0) of std_logic_vector(NbitW-1 downto 0); - function fw_init(LNum : natural) return layer_ram is - variable tmp_arr : layer_ram := (others =>(others => (others => '0'))); - begin - if WBinit = true then - for i in 0 to NumIn-1 loop - for j in 0 to NumN-1 loop - tmp_arr(i)(j) := w_init(LNum)(i)(j); - end loop; - end loop; - end if; - return tmp_arr ; - end fw_init; - - - - - function fb_init(LNum : natural) return ramd_type is - variable tmp_arr : ramd_type := (others => (others => '0')) ; - begin - if WBinit = true then - for i in 0 to NumN-1 loop - tmp_arr(i) := b_init(LNum)(i); - end loop; - end if; - return tmp_arr; - end fb_init; - - --function fb_init(LNum : natural) return ramd_type is - --begin - -- return ramd_type(b_init(LNum)); - --end fb_init; - - signal lram : layer_ram := fw_init(LNum); -- Layer RAM. One RAM per input. It stores the weights - signal breg : ramd_type := fb_init(LNum); -- Bias RAM. They can be RAM because they are not accessed simultaneously + signal lram : layer_ram; -- Layer RAM. One RAM per input. It stores the weights + signal breg : ramd_type; -- Bias RAM. They can be RAM because they are not accessed simultaneously signal outm : outm_type; -- RAM outputs to be multiplexed into rdata signal m_sel : std_logic_vector(NumIn-1 downto 0); --------- RAM select signal Wyb : std_logic_vector((NbitW*NumIn)-1 downto 0); -- Weight vectors @@ -114,7 +79,7 @@ signal Nouts : std_logic_vector(NbitOut-1 downto 0); ------ Outputs from neurons signal uaddr : unsigned(lra_l-1 downto 0); -- Unsigned address of weight and bias memories - -- Señales de control + -- Señales de control signal cont : integer range 0 to NumN-1; -- Neuron counter signal cntb : integer range 0 to NumN-1; -- Delayed counter for biases signal st : bit; ------- State @@ -123,9 +88,6 @@ signal en3 : std_logic; -- Shift register enable signal en_out : std_logic; - signal input_aux1 : std_logic_vector((NbitIn*NumIn)-1 downto 0); - signal input_aux2 : std_logic_vector((NbitIn*NumIn)-1 downto 0); - signal input_aux3 : std_logic_vector((NbitIn*NumIn)-1 downto 0); begin layerPS_inst: entity work.layerPS @@ -146,7 +108,7 @@ en => en1, en2 => en2, en_r => en3, - inputs => input_aux2, + inputs => inputs, Wyb => Wyb, bias => bias, @@ -265,10 +227,6 @@ en2 <= '0'; run_out <= '0'; else - input_aux1 <= inputs; - input_aux2 <= input_aux1; - --input_aux3 <=input_aux3 input_aux2; - cntb <= cont; -- Bias counter is delayed to assure correctness of pipeline data case st is when '0' => @@ -280,12 +238,13 @@ end case; when '1' => en1 <= '1'; -- en1 is delayed 1 cycle in order to insert a register for Wyb - if cont = NumN-1 then - cont <= 0; - st <= '0'; - else - cont <= cont +1; - end if; + case cont is + when (NumN-1) => + cont <= 0; + st <= '0'; + when others => + cont <= cont +1; + end case; end case; en2 <= en1;
/ANN_kernel/RTL_VHDL_files/layerSP_top.vhd
22,9 → 22,6
use IEEE.STD_LOGIC_1164.ALL;
use ieee.numeric_std.all;
 
library work;
use work.wb_init.all; -- initialization package, comment out when not used
 
-- Deprecated XPS library:
--library proc_common_v3_00_a;
--use proc_common_v3_00_a.proc_common_pkg.all; -- Only for simulation ( pad_power2() )
33,17 → 30,15
 
generic
(
WBinit : boolean := false;
LNum : natural := 0; ------- layer number (needed for initialization)
NumN : natural := 34; ------- Number of neurons of the layer
NumIn : natural := 27; ------- Number of inputs of each neuron
NumN : natural := 8; ------- Number of neurons of the layer
NumIn : natural := 64; ------- Number of inputs of each neuron
NbitIn : natural := 8; ------- Bit width of the input data
NbitW : natural := 32; ------- Bit width of weights and biases
NbitOut : natural := 8; ------- Bit width of the output data
lra_l : natural := 11; ------- Layer RAM address length. It should value log2(NumN)+log2(NumIn)
wra_l : natural := 5; ------- Weight RAM address length. It should value log2(NumIn)
bra_l : natural := 6; ------- Bias RAM address length. It should value log2(NumN)
LSbit : natural := 6 ------- Less significant bit of the outputs
NbitW : natural := 8; ------- Bit width of weights and biases
NbitOut : natural := 12; ------- Bit width of the output data
lra_l : natural := 10; ------- Layer RAM address length. It should value log2(NumN)+log2(NumIn)
wra_l : natural := 6; ------- Weight RAM address length. It should value log2(NumIn)
bra_l : natural := 3; ------- Bias RAM address length. It should value log2(NumN)
LSbit : natural := 4 ------- Less significant bit of the outputs
);
 
port
69,39 → 64,14
 
architecture Behavioral of layerSP_top is
 
--type ramd_type is array (pad_power2(NumIn)-1 downto 0) of std_logic_vector(NbitW-1 downto 0); -- Optimal: 32 or 64 spaces
--type layer_ram is array (pad_power2(NumN)-1 downto 0) of ramd_type;
type ramd_type is array (NumIn-1 downto 0) of std_logic_vector(NbitW-1 downto 0); -- Optimal: 32 or 64 spaces
type layer_ram is array (NumN-1 downto 0) of ramd_type;
type outm_type is array (NumN-1 downto 0) of std_logic_vector(NbitW-1 downto 0);
 
function fw_init(LNum : natural) return layer_ram is
variable tmp_arr : layer_ram := (others => (others => (others => '0'))) ;
begin
if WBinit = true then
for i in 0 to NumIn-1 loop
for j in 0 to NumN-1 loop
tmp_arr(j)(i) := w_init(LNum)(i)(j);
end loop;
end loop;
end if;
return tmp_arr ;
end fw_init;
 
function fb_init(LNum : natural) return outm_type is
variable tmp_arr : outm_type := (others => (others => '0')) ;
begin
if WBinit = true then
for i in 0 to NumN-1 loop
tmp_arr(i) := b_init(LNum)(i);
end loop;
end if;
return tmp_arr;
end fb_init;
 
 
 
signal lram : layer_ram := fw_init(LNum); -- Layer RAM. One RAM per neuron. It stores the weights
signal breg : outm_type := fb_init(LNum); -- Bias registers. They can not be RAM because they are accessed simultaneously
signal lram : layer_ram; -- Layer RAM. One RAM per neuron. It stores the weights
signal breg : outm_type; -- Bias registers. They can not be RAM because they are accessed simultaneously
signal outm : outm_type; -- RAM outputs to be multiplexed into rdata
signal m_sel : std_logic_vector(NumN-1 downto 0); -------- RAM select
signal Wyb : std_logic_vector((NbitW*NumN)-1 downto 0); --- Weight vectors
196,14 → 166,7
end if;
end if;
end process;
outm(i) <= lram(i)(to_integer(uaddr(wra_l-1 downto 0))) when (uaddr(wra_l-1 downto 0) <= NumIn-1) else
(others => '0') ; -- Read all RAM
-- In my case I have 27 inputs and 34 neurons in the first layer. When I address
-- the 1 layer's inputs for the second neuron the layer which acccepts a 6 bit wide
-- input address (layer 2) sees the ..1 00100 (34) number and interprets it as an input
-- address (which goes only up to 33) hence the bound check failure
-- fix: I've changed the assignment to a conditional one to check if we are not
-- trying to read a weight of an input higher than the number of this layer's inputs.
outm(i) <= lram(i)(to_integer(uaddr(wra_l-1 downto 0))); -- Read all RAM
end generate;
 
-- Synchronous read including breg:
210,8 → 173,6
process (clk)
begin
if (clk'event and clk = '1') then
--report "addr: " & integer'image(wra_l-1);
--report "addr: " & integer'image(to_integer(uaddr(wra_l-1 downto 0)) );
if (m_en = '1') then
if (b_sel = '1') then
rdata <= breg(to_integer(uaddr(bra_l-1 downto 0))); -- Bias registers selected
285,9 → 246,6
else
cont <= cont +1;
end if;
--elsif (cont = NumIn-1) then -- for layers with more that
-- cont <= 0; -- 1 neuron uncommenting this
-- aux2_en3 <= '1'; -- solved a problem with cont resetting
end if;
en2 <= en1;
if (cont = 0 and run_in = '1') then
/ANN_kernel/RTL_VHDL_files/af_sigmoid2.vhd
28,8 → 28,7
entity af_sigmoid2 is
generic
(
Nbit : natural := 8;
lsbit : natural := 10
Nbit : natural := 8
);
port
(
48,7 → 47,7
-- Definition of internal modules, constants, signals, etc...
 
-- Sigmoid parameters:
constant f0 : real := 1.0; -- Slope at the origin
constant f0 : real := 0.5; -- Slope at the origin
constant fr : real := 2.0; -- fr = fmax - fmin
 
signal dataIn: integer range (2**Nbit-1) downto 0; -- To convert std_logic_vector input to integer index for the LUT
55,8 → 54,8
type table_t is array(0 to (2**Nbit)-1) of std_logic_vector(Nbit-1 downto 0); -- LUT type
 
-- Function Sigmoidal: generates the Look-Up-Table for the sigmoid activation function:
-- margin: maximum value of input
function Sigmoidal(margin:real;Nbit:natural;lsbit:natural) return table_t is
-- margin: maximun value of x.
function Sigmoidal(margin:real;Nbit:natural) return table_t is
variable scale,x,y,w,t: real;
variable u: integer;
variable fbits: std_logic_vector(Nbit-1 downto 0);
63,10 → 62,10
variable table: table_t;
begin
scale := (2.0*margin)/(2.0**Nbit); -- Calculates gap between to points
x := -margin;
x := -margin;
for idx in -(2**(Nbit-1)) to (2**(Nbit-1))-1 loop
y := ( fr / (1.0+exp(((-4.0*f0)/fr)*x)) ) - (fr/2.0);
w := y*(2.0**(lsbit)); -- Shifts bits to the left
w := y*(2.0**(Nbit-1)); -- Shifts bits to the left
t := round(w);
u := integer(t);
fbits := std_logic_vector(to_signed(u,Nbit));
75,12 → 74,12
end loop;
return table;
end Sigmoidal;
signal Table: table_t := Sigmoidal(2.0**(Nbit-lsbit-1),Nbit,lsbit); -- Generation of the LUT (at synthesis time)
signal Table: table_t := Sigmoidal(1.0,Nbit); -- Generation of the LUT (at synthesis time)
 
begin
 
-- Description of the activation function
dataIn <= to_integer(unsigned(inputs));
dataIn <= to_integer(signed(inputs));
 
Activacion: process(clk,reset)
begin
/ANN_kernel/RTL_VHDL_files/activation_function.vhd
25,7 → 25,6
entity activation_function is
generic
(
lsbit : natural := 10;
f_type : string := "linear"; -- Activation function type
Nbit : natural := 8 -- Bit width
);
42,22 → 41,6
 
architecture Structural of activation_function is
 
component af_sigmoid is
generic
(
Nbit : natural := 8
);
port
(
reset : in std_logic;
clk : in std_logic;
run_in : in std_logic; -- Start and input data validation
inputs : in std_logic_vector(Nbit-1 downto 0); -- Input data
run_out : out std_logic; -- Output data validation, run_in for the next layer
outputs : out std_logic_vector(Nbit-1 downto 0) -- Output data
);
end component;
 
begin
 
-- Linear activation function. It is a direct assignment:
70,7 → 53,7
-- Example 1: sigmoid activation function implemented as a Look-Up-Table (LUT):
Sigmoid_f:
if (f_type = "siglut") generate
siglut_inst: af_sigmoid
siglut_inst: entity work.af_sigmoid
generic map
(
Nbit => Nbit
92,8 → 75,7
siglut_inst: entity work.af_sigmoid2
generic map
(
Nbit => Nbit,
lsbit => lsbit
Nbit => Nbit
)
port map
(
/ANN_kernel/RTL_VHDL_files/adder_tree.vhd
46,29 → 46,6
 
architecture Behavioral of adder_tree is
 
 
 
component adder_tree is
generic
(
NumIn : integer := 9; -- Number of inputs
Nbit : integer := 12 -- Bit width of the input data
);
 
port
(
-- Input ports
reset : in std_logic;
clk : in std_logic;
en : in std_logic; -- Enable
inputs : in std_logic_vector((Nbit*NumIn)-1 downto 0); -- Input data
 
-- Output ports
en_out : out std_logic; -- Output enable (output data validation)
output : out std_logic_vector(Nbit-1 downto 0) -- Output of the tree adder
);
end component;
 
constant NumIn2 : integer := NumIn/2; -- Number of imputs of the next adder tree layer
 
signal next_en : std_logic := '0'; -- Next adder tree layer enable
130,7 → 107,7
recursion:
if (NumIn > 2) generate
 
sub_adder_tree: adder_tree
sub_adder_tree: entity work.adder_tree
generic map
(
NumIn => (NumIn2)+(NumIn mod 2),

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.