1、用RNN实现通信中自动编码功能# -*- coding: utf-8 -*-Created on Tue May 1 10:37:44 2018author: User# importing libsimport numpy as npimport tensorflow as tfimport kerasfrom keras.layers import Input, Dense,TimeDistributed,GaussianNoise,Lambda,Dropoutfrom keras.layers.recurrent import LSTM,SimpleRNNfrom keras.model
2、s import Model,Sequentialfrom keras import regularizersfrom keras.layers.normalization import BatchNormalizationfrom keras.optimizers import Adam,SGDfrom keras import backend as K# importing libs end# for reproducing reslutfrom numpy.random import seedseed(1)from tensorflow import set_random_seedset
3、_random_seed(3)M = 4k = np.log2(M)#四符号信源k = int(k)n_channel = 2R = k/n_channelprint (M:,M,k:,k,n:,n_channel,R:,R)#generating data of size NN = 32000label = np.random.randint(M,size=N) #随机产生N个区间0,M)值# creating one hot encoded vectorsdata = for i in label: temp = np.zeros(M) tempi = 1 data.append(temp
4、) # # checking data shape#data = np.array(data)#print (data.shape)#time_steps = 2#BATCH_SIZE = 20#data = data.reshape(int(N/time_steps),time_steps,4)#m = Sequential()#m.add(SimpleRNN(output_dim = 4, input_dim = 4, batch_size = BATCH_SIZE,# return_sequences = True,# stateful=True)#m.add(TimeDistribut
5、ed(Dense(4, activation=relu)#m.add(TimeDistributed(Dense(2, activation=linear)#m.add(Lambda(lambda x: np.sqrt(n_channel)*tf.nn.l2_normalize(x, dim=1)#EbNo_train = 5.01187 # coverted 7 db of EbNo#m.add(TimeDistributed(GaussianNoise(np.sqrt(1/(2*R*EbNo_train)#m.add(GaussianNoise(np.sqrt(1/(2*R*EbNo_tr
6、ain)#m.add(TimeDistributed(Dense(4, activation=relu)#m.add(SimpleRNN(output_dim = 4, input_dim = 4, batch_size = 2,# return_sequences = True,# stateful=True,)#m.add(TimeDistributed(Dense(4, activation=softmax)#adam = Adam(lr=0.001)#print (m.summary()#pile(loss=categorical_crossentropy,optimizer=adam
7、)#m.fit(data,data,nb_epoch=45, batch_size = BATCH_SIZE)#print (m.summary()#from keras.models import load_model# if you want to save model then remove below comment# autoencoder.save(autoencoder_v_best.model)# making encoder from full autoencoder#input_signal = Input(shape=(BATCH_SIZE,M)#encoder = m.
8、layers0(input_signal)# making decoder from full autoencoder#encoded_input = Input(shape=(n_channel,)#deco = autoencoder.layers-2(encoded_input)#deco = autoencoder.layers-1(deco)#decoder = Model(encoded_input, deco)# checking data shapedata = np.array(data)print(data.shape) #(32000, 4)time_steps = 4B
9、ATCH_SIZE = 32data = data.reshape(int(N/time_steps),time_steps,4) #N=32000, time_steps = 4 ,(8000, 4, 4)print(data = data.reshape(int(N/time_steps),time_steps,4) #(8000, 4, 4)print(data.shape) #(8000, 4, 4)#input_signal = Input(shape=(M,), batch_shape = 20)#encoded = SimpleRNN(output_dim = 4, input_
10、dim = 4, batch_size = BATCH_SIZE,# return_sequences = True,# stateful=True)(input_signal)#encoded1 = TimeDistributed(Dense(2, activation=linear)(encoded)#encoded2 = Lambda(lambda x: np.sqrt(n_channel)*tf.nn.l2_normalize(x, dim=1)(encoded1)#EbNo_train = 5.01187 # coverted 7 db of EbNo#encoded3 = Gaus
11、sianNoise(np.sqrt(1/(2*R*EbNo_train)(encoded2)#通过AWGN信道#decoded = SimpleRNN(output_dim = 4, input_dim = 4, batch_size = BATCH_SIZE,# return_sequences = True,# stateful=True)(encoded3)#decoded1 = TimeDistributed(Dense(4, activation=softmax)(decoded)#adam = Adam(lr=0.001)#autoencoder = Model(input_sig
12、nal, decoded1)#pile(optimizer=adam, loss=categorical_crossentropy)#print (autoencoder.summary()#autoencoder.fit(data, data,# epochs=45,# batch_size=BATCH_SIZE)k = 4 #信息论里的(n,k)com = 5 #编码解码器复杂度autoencoder = Sequential()encoded = SimpleRNN(output_dim = com, input_dim = 4, batch_size = BATCH_SIZE, ret
13、urn_sequences = True, activation=relu,# stateful=True, ) # BATCH_SIZE=32autoencoder.add(encoded) #第一层 layers0encoded1 = TimeDistributed(Dense(k, activation=linear) #5-2# model = Sequential()# model.add(TimeDistributed(Dense(8), input_shape=(10, 16)# 输出还是10个向量,但是输出的维度由16变成了8,也就是(32,10,8)。# TimeDistri
14、buted层的作用就是把Dense层应用到这10个具体的向量上,对每一个向量进行了一个Dense操作# model.add(TimeDistributed(Dense(32) 次层,输入应该已知# now model.output_shape = (None, 10, 32)# The output will then have shape (32, 10, 32).autoencoder.add(encoded1) #第二层 layers1encoded2 = Lambda(lambda x: np.sqrt(n_channel)*tf.nn.l2_normalize(x, dim=1) #
15、Signature: tf.nn.l2_normalize(x, axis=None, epsilon=1e-12, name=None, dim=None)# 使用L2范数沿尺寸“轴”标准化。#autoencoder.add(encoded2) #第三层 layers2EbNo_train = 5.01187 # coverted 7 db of EbNo#m.add(TimeDistributed(GaussianNoise(np.sqrt(1/(2*R*EbNo_train)encoded3 = GaussianNoise(np.sqrt(1/(2*R*EbNo_train)autoen
16、coder.add(encoded3) #第四层 layers3decoded = SimpleRNN(output_dim = com, input_dim = k, batch_size = BATCH_SIZE, return_sequences = True,# stateful=True, )autoencoder.add(decoded) #第五层 layers4decoded1 = TimeDistributed(Dense(4, activation=softmax)autoencoder.add(decoded1) #第六层 layers5adam = Adam(lr=0.0
17、01)print (autoencoder.summary()pile(loss=categorical_crossentropy,optimizer=adam)autoencoder.fit(data,data,nb_epoch=45, batch_size = BATCH_SIZE)print (autoencoder.summary()from keras.models import load_model# if you want to save model then remove below comment# autoencoder.save(autoencoder_v_best.mo
18、del)# making encoder from full autoencoderencoder = Sequential()encoder.add(autoencoder.layers0)encoder.add(autoencoder.layers1)encoder.add(autoencoder.layers2)# making decoder from full autoencoderdecoder = Sequential()decoder.add(autoencoder.layers4)decoder.add(autoencoder.layers5)# generating dat
19、a for checking BER# if youre not using t-sne for visulation than set N to 70,000 for better result # for t-sne use less N like N = 1500N = 32000test_label = np.random.randint(M,size=N)test_data = for i in test_label: temp = np.zeros(M) tempi = 1 test_data.append(temp) test_data = np.array(test_data)
20、test_data = test_data.reshape(int(N/time_steps),time_steps,4) #(8000, 4, 4)def frange(x, y, jump): while x y: yield x x += jump # calculating BER# this is optimized BER function so it can handle large number of N# previous code has another for loop which was making it slowEbNodB_range = list(frange(
21、-4,8.5,0.5)ber = None*len(EbNodB_range)for n in range(0,len(EbNodB_range): EbNo=10.0*(EbNodB_rangen/10.0) noise_std = np.sqrt(1/(2*R*EbNo) noise_mean = 0 no_errors = 0 nn = N noise = noise_std * np.random.randn(nn,k) #nn*k数组(32000, 2) noise = noise.reshape(int(N/time_steps),time_steps,k) #(8000, 4,
22、4) encoded_signal = encoder.predict(test_data) #(8000, 4, 4) final_signal = encoded_signal + noise #(8000, 4, 4) pred_final_signal = decoder.predict(final_signal) #(8000, 4, 4) print(pred_final_signal ) print(pred_final_signal.shape) pred_final_signal = pred_final_signal.reshape(N,4) #(32000, 4) pre
23、d_output = np.argmax(pred_final_signal,axis=1) #行最大的索引 no_errors = (pred_output != test_label) no_errors = no_errors.astype(int).sum() bern = no_errors / nn print (SNR:,EbNodB_rangen,BER:,bern) # use below line for generating matlab like matrix which can be copy and paste for plotting ber graph in m
24、atlab #print(bern, ,end=) # ploting ber curveimport matplotlib.pyplot as pltfrom scipy import interpolateplt.plot(EbNodB_range, ber, bo,label=Autoencoder(2,2)plt.yscale(log)plt.xlabel(SNR Range)plt.ylabel(Block Error Rate)plt.grid()plt.legend(loc=upper right,ncol = 1)# for saving figure remove below comment#plt.savefig(AutoEncoder_2_2_constrained_BER_matplotlib)plt.show()