资源描述
用RNN实现通信中自动编码功能
# -*- coding: utf-8 -*-
"""
Created on Tue May 1 10:37:44 2018
@author: User
"""
# importing libs
import numpy as np
import tensorflow as tf
import keras
from keras.layers import Input, Dense,TimeDistributed,GaussianNoise,Lambda,Dropout
from keras.layers.recurrent import LSTM,SimpleRNN
from keras.models import Model,Sequential
from keras import regularizers
from keras.layers.normalization import BatchNormalization
from keras.optimizers import Adam,SGD
from keras import backend as K
# importing libs end
# for reproducing reslut
from numpy.random import seed
seed(1)
from tensorflow import set_random_seed
set_random_seed(3)
M = 4
k = np.log2(M)#四符号信源
k = int(k)
n_channel = 2
R = k/n_channel
print ('M:',M,'k:',k,'n:',n_channel,'R:',R)
#generating data of size N
N = 32000
label = np.random.randint(M,size=N) #随机产生N个区间[0,M)值
# creating one hot encoded vectors
data = []
for i in label:
temp = np.zeros(M)
temp[i] = 1
data.append(temp)
#
## checking data shape
#data = np.array(data)
#print (data.shape)
#time_steps = 2
#BATCH_SIZE = 20
#data = data.reshape(int(N/time_steps),time_steps,4)
#m = Sequential()
#m.add(SimpleRNN(output_dim = 4, input_dim = 4, batch_size = BATCH_SIZE,
# return_sequences = True,
# stateful=True))
#m.add(TimeDistributed(Dense(4, activation='relu')))
#m.add(TimeDistributed(Dense(2, activation='linear')))
#m.add(Lambda(lambda x: np.sqrt(n_channel)*tf.nn.l2_normalize(x, dim=1)))
#EbNo_train = 5.01187 # coverted 7 db of EbNo
##m.add(TimeDistributed(GaussianNoise(np.sqrt(1/(2*R*EbNo_train)))))
#m.add(GaussianNoise(np.sqrt(1/(2*R*EbNo_train))))
#m.add(TimeDistributed(Dense(4, activation='relu')))
#m.add(SimpleRNN(output_dim = 4, input_dim = 4, batch_size = 2,
# return_sequences = True,
# stateful=True,))
#m.add(TimeDistributed(Dense(4, activation='softmax')))
#adam = Adam(lr=0.001)
#print (m.summary())
#pile(loss='categorical_crossentropy',optimizer=adam)
#m.fit(data,data,nb_epoch=45, batch_size = BATCH_SIZE)
#print (m.summary())
#
#
#from keras.models import load_model
## if you want to save model then remove below comment
## autoencoder.save('autoencoder_v_best.model')
#
## making encoder from full autoencoder
#input_signal = Input(shape=(BATCH_SIZE,M))
#encoder = m.layers[0](input_signal)
#
## making decoder from full autoencoder
#encoded_input = Input(shape=(n_channel,))
#
#deco = autoencoder.layers[-2](encoded_input)
#deco = autoencoder.layers[-1](deco)
#decoder = Model(encoded_input, deco)
#
# checking data shape
data = np.array(data)
print(data.shape) #(32000, 4)
time_steps = 4
BATCH_SIZE = 32
data = data.reshape(int(N/time_steps),time_steps,4) #N=32000, time_steps = 4 ,(8000, 4, 4)
print("data = data.reshape(int(N/time_steps),time_steps,4") #(8000, 4, 4)
print(data.shape) #(8000, 4, 4)
#input_signal = Input(shape=(M,), batch_shape = 20)
#encoded = SimpleRNN(output_dim = 4, input_dim = 4, batch_size = BATCH_SIZE,
# return_sequences = True,
# stateful=True)(input_signal)
#encoded1 = TimeDistributed(Dense(2, activation='linear'))(encoded)
#encoded2 = Lambda(lambda x: np.sqrt(n_channel)*tf.nn.l2_normalize(x, dim=1))(encoded1)
#EbNo_train = 5.01187 # coverted 7 db of EbNo
#encoded3 = GaussianNoise(np.sqrt(1/(2*R*EbNo_train)))(encoded2)
##通过AWGN信道
#decoded = SimpleRNN(output_dim = 4, input_dim = 4, batch_size = BATCH_SIZE,
# return_sequences = True,
# stateful=True)(encoded3)
#decoded1 = TimeDistributed(Dense(4, activation='softmax'))(decoded)
#adam = Adam(lr=0.001)
#autoencoder = Model(input_signal, decoded1)
#pile(optimizer=adam, loss='categorical_crossentropy')
#print (autoencoder.summary())
#autoencoder.fit(data, data,
# epochs=45,
# batch_size=BATCH_SIZE)
k = 4 #信息论里的(n,k)
com = 5 #编码解码器复杂度
autoencoder = Sequential()
encoded = SimpleRNN(output_dim = com, input_dim = 4, batch_size = BATCH_SIZE,
return_sequences = True, activation='relu',
# stateful=True,
) # BATCH_SIZE=32
autoencoder.add(encoded) #第一层 layers[0]
encoded1 = TimeDistributed(Dense(k, activation='linear')) #5--2
# model = Sequential()
# model.add(TimeDistributed(Dense(8), input_shape=(10, 16)))
# 输出还是10个向量,但是输出的维度由16变成了8,也就是(32,10,8)。
# TimeDistributed层的作用就是把Dense层应用到这10个具体的向量上,对每一个向量进行了一个Dense操作
# model.add(TimeDistributed(Dense(32))) 次层,输入应该已知
# now model.output_shape == (None, 10, 32)
# The output will then have shape `(32, 10, 32)`.
autoencoder.add(encoded1) #第二层 layers[1]
encoded2 = Lambda(lambda x: np.sqrt(n_channel)*tf.nn.l2_normalize(x, dim=1))
#Signature: tf.nn.l2_normalize(x, axis=None, epsilon=1e-12, name=None, dim=None)
# 使用L2范数沿尺寸“轴”标准化。
#<keras.layers.core.Lambda object at 0x00000217D7FAF518>
autoencoder.add(encoded2) #第三层 layers[2]
EbNo_train = 5.01187 # coverted 7 db of EbNo
#m.add(TimeDistributed(GaussianNoise(np.sqrt(1/(2*R*EbNo_train)))))
encoded3 = GaussianNoise(np.sqrt(1/(2*R*EbNo_train)))
autoencoder.add(encoded3) #第四层 layers[3]
decoded = SimpleRNN(output_dim = com, input_dim = k, batch_size = BATCH_SIZE,
return_sequences = True,
# stateful=True,
)
autoencoder.add(decoded) #第五层 layers[4]
decoded1 = TimeDistributed(Dense(4, activation='softmax'))
autoencoder.add(decoded1) #第六层 layers[5]
adam = Adam(lr=0.001)
print (autoencoder.summary())
pile(loss='categorical_crossentropy',optimizer=adam)
autoencoder.fit(data,data,nb_epoch=45, batch_size = BATCH_SIZE)
print (autoencoder.summary())
from keras.models import load_model
# if you want to save model then remove below comment
# autoencoder.save('autoencoder_v_best.model')
# making encoder from full autoencoder
encoder = Sequential()
encoder.add(autoencoder.layers[0])
encoder.add(autoencoder.layers[1])
encoder.add(autoencoder.layers[2])
# making decoder from full autoencoder
decoder = Sequential()
decoder.add(autoencoder.layers[4])
decoder.add(autoencoder.layers[5])
# generating data for checking BER
# if you're not using t-sne for visulation than set N to 70,000 for better result
# for t-sne use less N like N = 1500
N = 32000
test_label = np.random.randint(M,size=N)
test_data = []
for i in test_label:
temp = np.zeros(M)
temp[i] = 1
test_data.append(temp)
test_data = np.array(test_data)
test_data = test_data.reshape(int(N/time_steps),time_steps,4) #(8000, 4, 4)
def frange(x, y, jump):
while x < y:
yield x
x += jump
# calculating BER
# this is optimized BER function so it can handle large number of N
# previous code has another for loop which was making it slow
EbNodB_range = list(frange(-4,8.5,0.5))
ber = [None]*len(EbNodB_range)
for n in range(0,len(EbNodB_range)):
EbNo=10.0**(EbNodB_range[n]/10.0)
noise_std = np.sqrt(1/(2*R*EbNo))
noise_mean = 0
no_errors = 0
nn = N
noise = noise_std * np.random.randn(nn,k) #nn*k数组(32000, 2)
noise = noise.reshape(int(N/time_steps),time_steps,k) #(8000, 4, 4)
encoded_signal = encoder.predict(test_data) #(8000, 4, 4)
final_signal = encoded_signal + noise #(8000, 4, 4)
pred_final_signal = decoder.predict(final_signal) #(8000, 4, 4)
print("pred_final_signal ")
print(pred_final_signal.shape)
pred_final_signal = pred_final_signal.reshape(N,4) #(32000, 4)
pred_output = np.argmax(pred_final_signal,axis=1) #行最大的索引
no_errors = (pred_output != test_label)
no_errors = no_errors.astype(int).sum()
ber[n] = no_errors / nn
print ('SNR:',EbNodB_range[n],'BER:',ber[n])
# use below line for generating matlab like matrix which can be copy and paste for plotting ber graph in matlab
#print(ber[n], " ",end='')
# ploting ber curve
import matplotlib.pyplot as plt
from scipy import interpolate
plt.plot(EbNodB_range, ber, 'bo',label='Autoencoder(2,2)')
plt.yscale('log')
plt.xlabel('SNR Range')
plt.ylabel('Block Error Rate')
plt.grid()
plt.legend(loc='upper right',ncol = 1)
# for saving figure remove below comment
#plt.savefig('AutoEncoder_2_2_constrained_BER_matplotlib')
plt.show()
展开阅读全文