瀏覽代碼

first commit

Shellmiao 4 年之前
當前提交
8cc6ea9659

+ 1 - 0
Data/AttackTypes.csv

@@ -0,0 +1 @@
+apache2,dos
back,dos
buffer_overflow,u2r
ftp_write,r2l
guess_passwd,r2l
httptunnel,u2r
imap,r2l
ipsweep,probe
land,dos
loadmodule,u2r
mailbomb,dos
mscan,probe
multihop,r2l
named,r2l
neptune,dos
nmap,probe
perl,u2r
phf,r2l
pod,dos
portsweep,probe
processtable,dos
ps,u2r
rootkit,u2r
saint,probe
satan,probe
sendmail,r2l
smurf,dos
snmpguess,u2r
snmpgetattack,r2l
spy,r2l
sqlattack,u2r
teardrop,dos
udpstorm,dos
warezclient,r2l
warezmaster,r2l
worm,u2r
xlock,r2l
xsnoop,r2l
xterm,u2r
normal,normal
unknown,unknown

+ 151 - 0
Data/Preprocess.py

@@ -0,0 +1,151 @@
+import pandas as pd
+import numpy as np
+from sklearn.preprocessing import MinMaxScaler
+
+
+def _col_names():
+    """
+    返回列名称
+    :return: 列名称
+    """
+    return ["duration", "protocol_type", "service", "flag", "src_bytes",
+            "dst_bytes", "land", "wrong_fragment", "urgent", "hot", "num_failed_logins",
+            "logged_in", "num_compromised", "root_shell", "su_attempted", "num_root",
+            "num_file_creations", "num_shells", "num_access_files", "num_outbound_cmds",
+            "is_host_login", "is_guest_login", "count", "srv_count", "serror_rate",
+            "srv_serror_rate", "rerror_rate", "srv_rerror_rate", "same_srv_rate",
+            "diff_srv_rate", "srv_diff_host_rate", "dst_host_count", "dst_host_srv_count",
+            "dst_host_same_srv_rate", "dst_host_diff_srv_rate", "dst_host_same_src_port_rate",
+            "dst_host_srv_diff_host_rate", "dst_host_serror_rate", "dst_host_srv_serror_rate",
+            "dst_host_rerror_rate", "dst_host_srv_rerror_rate", "label"]
+
+
+def _encode_text_dummy(df, name):
+    """
+    将字符串类型的参数编码为二进制
+    :param df:
+    :param name:
+    :return:
+    """
+    dummies = pd.get_dummies(df.loc[:, name])
+    for x in dummies.columns:
+        dummy_name = "{}-{}".format(name, x)
+        df.loc[:, dummy_name] = dummies[x]
+    df.drop(name, axis=1, inplace=True)
+
+
+def _to_xy(df):
+    """
+    编码整个df,分为x,y
+    :param df:
+    :param target:
+    :return:
+    """
+    labels = ['label-dos', 'label-normal', 'label-probe', 'label-r2l', 'label-u2r']
+    dummies = df.loc[:, labels]
+    for label in labels:
+        df = df.drop(label, 1)
+    return df, dummies
+
+
+def _log_turns(df, col):
+    return df[col].apply(np.log1p)
+
+
+# 数据归一化
+def _rescale_features(data):
+    min_max_scaler = MinMaxScaler()
+    data = min_max_scaler.fit_transform(data)
+    return data
+
+
+def _get_attack_map():
+    attack_map = [x.strip().split(',') for x in open('Data/AttackTypes.csv', 'r')]
+    attack_map = {k + '.': v for (k, v) in attack_map}
+    return attack_map
+
+
+def _divide_xy(x, y):
+    x_normal = x[y['label-normal'] == 1].values.astype(np.float32)
+    y_normal = y[y['label-normal'] == 1].values.astype(np.float32)
+    x_u2r = x[y['label-u2r'] == 1].values.astype(np.float32)
+    y_u2r = y[y['label-u2r'] == 1].values.astype(np.float32)
+    x_r2l = x[y['label-r2l'] == 1].values.astype(np.float32)
+    y_r2l = y[y['label-r2l'] == 1].values.astype(np.float32)
+    x_dos = x[y['label-dos'] == 1].values.astype(np.float32)
+    y_dos = y[y['label-dos'] == 1].values.astype(np.float32)
+    x_probe = x[y['label-probe'] == 1].values.astype(np.float32)
+    y_probe = y[y['label-probe'] == 1].values.astype(np.float32)
+    x = x.values.astype(np.float32)
+    y = y.values.astype(np.float32)
+    x = _rescale_features(x)
+    x_normal = _rescale_features(x_normal)
+    x_u2r = _rescale_features(x_u2r)
+    x_r2l = _rescale_features(x_r2l)
+    x_dos = _rescale_features(x_dos)
+    x_probe = _rescale_features(x_probe)
+    return {'all': [x, y],
+            'normal': [x_normal, y_normal],
+            'u2r': [x_u2r, y_u2r],
+            'r2l': [x_r2l, y_r2l],
+            'dos': [x_dos, y_dos],
+            'probe': [x_probe, y_probe]}
+
+
+# x_train, y_train, x_train_normal, y_train_normal, x_train_u2r, y_train_u2r, x_train_r2l, y_train_r2l, x_train_dos, y_train_dos, x_train_probe, y_train_probe
+
+def get_data(argument):
+    col_names = _col_names()
+    df = pd.read_csv("Data/kddcup.data_10_percent_corrected", header=None, names=col_names)
+    attack_map = _get_attack_map()
+    df['label'] = df['label'].replace(attack_map)
+    encode_lst = ['protocol_type', 'service', 'flag', 'land', 'logged_in', 'is_host_login', 'is_guest_login']
+    for name in encode_lst:
+        _encode_text_dummy(df, name)
+    log_lst = ['duration', 'src_bytes', 'dst_bytes']
+    for col in log_lst:
+        df[col] = _log_turns(df, col)
+    _encode_text_dummy(df, 'label')
+    # 编码label列
+    # labels = df['label'].copy()
+    # labels[labels != 'normal.'] = 0
+    # labels[labels == 'normal.'] = 1
+    # df['label'] = labels
+
+    # 分出train集以及test集
+    df_train = df.sample(frac=0.5, random_state=42)
+    df_test = df.loc[~df.index.isin(df_train.index)]
+    # 分为训练集和label
+    x_train, y_train = _to_xy(df_train)
+    # y_train = y_train.flatten().astype(int)
+    x_test, y_test = _to_xy(df_test)
+    # y_test = y_test.flatten().astype(int)
+    if argument == 'train':
+        return _divide_xy(x_train, y_train)
+    else:
+        return _divide_xy(x_test, y_test)
+
+    # dataset = {'x_train': x_train.astype(np.float32),
+    #            'y_train': y_train.astype(np.float32),
+    #            'x_train_normal': x_train_normal.astype(np.float32),
+    #            'y_train_normal': y_train_normal.astype(np.float32),
+    #            'x_train_abnormal': x_train_abnormal.astype(np.float32),
+    #            'y_train_abnormal': y_train_abnormal.astype(np.float32),
+    #            'x_test': x_test.astype(np.float32),
+    #            'y_test': y_test.astype(np.float32),
+    #            'x_test_normal': x_test_normal.astype(np.float32),
+    #            'y_test_normal': y_test_normal.astype(np.float32),
+    #            'x_test_abnormal': x_test_abnormal.astype(np.float32),
+    #            'y_test_abnormal': y_test_abnormal.astype(np.float32),
+    #            }
+
+# def get_dataset(split='all'):
+#     dataset = _get_data()
+#     if split == 'all':
+#         return dataset['x_train'], dataset['y_train'], dataset['x_train_normal'], dataset['y_train_normal'], dataset[
+#             'x_train_abnormal'], dataset['y_train_abnormal'], dataset['x_test'], dataset['y_test'], dataset[
+#                    'x_test_normal'], dataset['y_test_normal'], dataset['x_test_abnormal'], dataset['y_test_abnormal'],
+#     else:
+#         key_img = 'x_' + split
+#         key_lbl = 'y_' + split
+#         return dataset[key_img], dataset[key_lbl]

二進制
Data/__pycache__/Preprocess.cpython-36.pyc


+ 7 - 0
GenerateAbnormalData.py

@@ -0,0 +1,7 @@
+import numpy as np
+
+
+def generate_abnormal_data(generator, batch_size, latent_dim=32):
+    noise = np.random.normal(0, 1, (batch_size, latent_dim))
+    abnormal_data = generator.predict(noise)
+    return abnormal_data

+ 36 - 0
IDS/CNN/CNNetwork.py

@@ -0,0 +1,36 @@
+import numpy as np
+from tensorflow.keras.callbacks import EarlyStopping
+from tensorflow.keras.models import Sequential
+from tensorflow.keras.layers import Dense, Conv1D, MaxPooling1D, Flatten
+
+
+def build_cnn_network():
+    model = Sequential()
+    model.add(Conv1D(32, kernel_size=5, strides=1,
+                     activation='relu',
+                     input_shape=(121, 1)))
+    model.add(MaxPooling1D(pool_size=2, strides=2))
+    model.add(Conv1D(64, 5, activation='relu'))
+    model.add(MaxPooling1D(pool_size=2))
+    model.add(Flatten())
+    model.add(Dense(1000, activation='relu'))
+    model.add(Dense(5, activation='softmax'))
+    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])  # 多类的对数损失
+    return model
+
+
+def run_cnn_network(train, target, epochs):
+    shuffle_ix = np.random.permutation(np.arange(len(train)))
+    train_r = train[shuffle_ix]
+    target_r = target[shuffle_ix]
+    cnn = build_cnn_network()
+    train_r = np.expand_dims(train_r, axis=2)
+    early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=0, mode='auto')
+    cnn.fit(x=train_r, y=target_r, epochs=epochs, validation_split=0.1, batch_size=128, callbacks=[early_stopping])
+    return cnn
+
+
+def evaluate_cnn_network(cnn, test, test_target, flag):
+    loss, accuracy = cnn.evaluate(np.expand_dims(test, axis=2), test_target, batch_size=None, verbose=1)
+    # print('[-]( 测试集: ' + flag + ' )  测试损失为:' + str(loss) + '  准确度为:' + str(accuracy))
+    return loss, accuracy

二進制
IDS/CNN/__pycache__/CNNetwork.cpython-36.pyc


+ 21 - 0
Network/BiGAN/Discriminator.py

@@ -0,0 +1,21 @@
+from tensorflow.keras import Input, Model
+from tensorflow.keras.layers import concatenate, Dense, LeakyReLU, Dropout
+
+
+def build_discriminator(latent_dim):
+    z = Input(shape=(latent_dim,))
+    data = Input(shape=(121,))
+    d_in = concatenate([z, data])
+
+    model = Dense(128, kernel_initializer='glorot_uniform')(d_in)
+    model = LeakyReLU(alpha=0.1)(model)
+    model = Dropout(0.2)(model)
+    model = Dense(128, kernel_initializer='glorot_uniform')(model)
+    model = LeakyReLU(alpha=0.1)(model)
+    model = Dropout(0.2)(model)
+    model = Dense(128, kernel_initializer='glorot_uniform')(model)
+    model = LeakyReLU(alpha=0.1)(model)
+    model = Dropout(0.2)(model)
+    validity = Dense(1, activation="sigmoid")(model)
+
+    return Model([z, data], validity)

+ 18 - 0
Network/BiGAN/Encoder.py

@@ -0,0 +1,18 @@
+from tensorflow.keras import Sequential, Input, Model
+from tensorflow.keras.layers import Dense, LeakyReLU, BatchNormalization
+
+
+def build_encoder(latent_dim):
+    model = Sequential()
+
+    model.add(Dense(64, input_dim=121, kernel_initializer='glorot_uniform'))
+    model.add(LeakyReLU(alpha=0.1))
+    model.add(BatchNormalization(momentum=0.8))
+    model.add(Dense(latent_dim, kernel_initializer='glorot_uniform'))
+
+    model.summary()
+
+    real = Input(shape=(121,))
+    z = model(real)
+
+    return Model(real, z)

+ 17 - 0
Network/BiGAN/Generator.py

@@ -0,0 +1,17 @@
+from tensorflow.keras import Input, Model, Sequential
+from tensorflow.keras.layers import LeakyReLU, BatchNormalization, Dense
+
+
+def build_generator(latent_dim):
+    model = Sequential()
+    model.add(Dense(64, input_dim=latent_dim, kernel_initializer='glorot_uniform'))
+    model.add(LeakyReLU(alpha=0.1))
+    model.add(BatchNormalization(momentum=0.8))
+    model.add(Dense(128, kernel_initializer='glorot_uniform'))
+    model.add(LeakyReLU(alpha=0.1))
+    model.add(BatchNormalization(momentum=0.8))
+    model.add(Dense(121, kernel_initializer='glorot_uniform'))
+    model.summary()
+    noise = Input(shape=(latent_dim,))
+    fake = model(noise)
+    return Model(noise, fake)

+ 60 - 0
Network/BiGAN/RunBiGanNetwork.py

@@ -0,0 +1,60 @@
+import numpy as np
+from tensorflow.keras import Input, Model
+from tensorflow.keras.optimizers import Adam
+
+from Network.BiGAN.Discriminator import build_discriminator
+from Network.BiGAN.Encoder import build_encoder
+from Network.BiGAN.Generator import build_generator
+
+
+def init_network(latent_dim):
+    discriminator = build_discriminator(latent_dim)
+    optimizer = Adam(0.00001, 0.5)
+    # optimizer_d = Adam(0.000005, 0.5)
+    # optimizer_g = Adam(0.00001, 0.5)
+    discriminator.compile(loss=['binary_crossentropy'],
+                          optimizer=optimizer,
+                          metrics=['accuracy'])
+    generator = build_generator(latent_dim)
+    encoder = build_encoder(latent_dim)
+    discriminator.trainable = False
+
+    z = Input(shape=(latent_dim,))
+    data_ = generator(z)
+
+    data = Input(shape=(121,))
+    z_ = encoder(data)
+
+    fake = discriminator([z, data_])
+    valid = discriminator([z_, data])
+
+    bigan_generator = Model([z, data], [fake, valid])
+    bigan_generator.compile(loss=['binary_crossentropy', 'binary_crossentropy'],
+                            optimizer=optimizer)
+    return generator, discriminator, encoder, bigan_generator
+
+
+def run_bigan(train_x, train_y, epochs, batch_size=50, latent_dim=32):
+    generator, discriminator, encoder, bigan_generator = init_network(latent_dim)
+    valid = np.ones((batch_size, 1))
+    fake = np.zeros((batch_size, 1))
+    for epoch in range(epochs):
+        # ---------------------
+        #  训练判别器
+        # ---------------------
+        z = np.random.normal(0, 1, (batch_size, latent_dim))
+        data_ = generator.predict(z)
+
+        inx = np.random.randint(0, train_x.shape[0], batch_size)
+        data = train_x[inx]
+        z_ = encoder.predict(data)
+
+        d_loss_real = discriminator.train_on_batch([z_, data], valid)
+        d_loss_fake = discriminator.train_on_batch([z, data_], fake)
+        d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
+        # ---------------------
+        #  训练生成器
+        # ---------------------
+        g_loss = bigan_generator.train_on_batch([z, data], [valid, fake])
+        print("%d [D loss: %f, acc: %.2f%%] [G loss: %f]" % (epoch, d_loss[0], 100 * d_loss[1], g_loss[0]))
+    return generator

二進制
Network/BiGAN/__pycache__/Discriminator.cpython-36.pyc


二進制
Network/BiGAN/__pycache__/Encoder.cpython-36.pyc


二進制
Network/BiGAN/__pycache__/Generator.cpython-36.pyc


二進制
Network/BiGAN/__pycache__/RunBiGanNetwork.cpython-36.pyc


+ 39 - 0
Network/ExponentialMovingAverage.py

@@ -0,0 +1,39 @@
+from keras import backend as K
+
+
+class ExponentialMovingAverage:
+    """对模型权重进行指数滑动平均。
+    用法:在model.compile之后、第一次训练之前使用;
+    先初始化对象,然后执行inject方法。
+    """
+
+    def __init__(self, model, momentum=0.9999):
+        self.momentum = momentum
+        self.model = model
+        self.ema_weights = [K.zeros(K.shape(w)) for w in model.weights]
+
+    def inject(self):
+        """添加更新算子到model.metrics_updates。
+        """
+        self.initialize()
+        for w1, w2 in zip(self.ema_weights, self.model.weights):
+            op = K.moving_average_update(w1, w2, self.momentum)
+            self.model.add_metric(op, name='exponential_moving_average')
+
+    def initialize(self):
+        """ema_weights初始化跟原模型初始化一致。
+        """
+        self.old_weights = K.batch_get_value(self.model.weights)
+        K.batch_set_value(zip(self.ema_weights, self.old_weights))
+
+    def apply_ema_weights(self):
+        """备份原模型权重,然后将平均权重应用到模型上去。
+        """
+        self.old_weights = K.batch_get_value(self.model.weights)
+        ema_weights = K.batch_get_value(self.ema_weights)
+        K.batch_set_value(zip(self.model.weights, ema_weights))
+
+    def reset_old_weights(self):
+        """恢复模型到旧权重。
+        """
+        K.batch_set_value(zip(self.model.weights, self.old_weights))

+ 21 - 0
Network/GAN/Discriminator.py

@@ -0,0 +1,21 @@
+from tensorflow.keras import Sequential, Input, Model
+from tensorflow.keras.layers import Dense, LeakyReLU, Dropout
+
+
+def build_discriminator():
+    model = Sequential()
+    model.add(Dense(256, input_dim=121, kernel_initializer='glorot_uniform'))
+    model.add(LeakyReLU(alpha=0.1))
+    model.add(Dropout(0.2))
+    model.add(Dense(128, kernel_initializer='glorot_uniform'))
+    model.add(LeakyReLU(alpha=0.1))
+    model.add(Dropout(0.2))
+    model.add(Dense(128, kernel_initializer='glorot_uniform'))
+    model.add(LeakyReLU(alpha=0.1))
+    model.add(Dropout(0.2))
+    model.add(Dense(1, kernel_initializer='glorot_uniform', activation='sigmoid'))
+    model.summary()
+
+    data = Input(shape=(121,))
+    validity = model(data)
+    return Model(data, validity)

+ 18 - 0
Network/GAN/Generator.py

@@ -0,0 +1,18 @@
+from tensorflow.keras import Sequential, Input, Model
+from tensorflow.keras.layers import Dense, LeakyReLU, Dropout, BatchNormalization
+
+
+def build_generator(latent_dim):
+    model = Sequential()
+    model.add(Dense(64, input_dim=latent_dim, kernel_initializer='glorot_uniform'))
+    model.add(LeakyReLU(alpha=0.1))
+    model.add(BatchNormalization(momentum=0.8))
+    model.add(Dense(128, kernel_initializer='glorot_uniform'))
+    model.add(LeakyReLU(alpha=0.1))
+    model.add(BatchNormalization(momentum=0.8))
+    model.add(Dense(121, kernel_initializer='glorot_uniform'))
+    model.summary()
+
+    noise = Input(shape=(latent_dim,))
+    fake = model(noise)
+    return Model(noise, fake)

+ 59 - 0
Network/GAN/RunNetwork.py

@@ -0,0 +1,59 @@
+import numpy as np
+from tensorflow.keras import Input, Model
+from tensorflow.keras.optimizers import Adam
+
+from Network.ExponentialMovingAverage import ExponentialMovingAverage
+from Network.GAN.Discriminator import build_discriminator
+from Network.GAN.Generator import build_generator
+
+
+def init_network(latent_dim):
+    generator = build_generator(latent_dim)
+    discriminator = build_discriminator()
+    optimizer = Adam(0.00001, 0.9)
+    optimizer_d = Adam(0.00001, 0.9)
+    optimizer_g = Adam(0.00001, 0.9)
+    discriminator.compile(loss='binary_crossentropy',
+                          optimizer=optimizer_d,
+                          metrics=['accuracy']
+                          )
+    # ema = ExponentialMovingAverage(discriminator)
+    # ema.inject()
+    z = Input(shape=(latent_dim,))
+    fake = generator(z)
+
+    discriminator.trainable = False
+    validity = discriminator(fake)
+    combined = Model(z, validity)
+    combined.compile(loss='binary_crossentropy',
+                     optimizer=optimizer_g,
+                     )
+    # ema = ExponentialMovingAverage(combined)
+    # ema.inject()
+    return generator, discriminator, combined
+
+
+def run_network(train_x, train_y, epochs, batch_size=50, latent_dim=32):
+    generator, discriminator, combined = init_network(latent_dim)
+    valid = np.ones((batch_size, 1))
+    invalid = np.zeros((batch_size, 1))
+    for epoch in range(epochs):
+        # ---------------------
+        #  训练判别器
+        # ---------------------
+        inx = np.random.randint(0, train_x.shape[0], batch_size)
+        real = train_x[inx]
+        noise = np.random.normal(0, 1, (batch_size, latent_dim))
+        fake = generator.predict(noise)
+        d_loss_real = discriminator.train_on_batch(real, valid)
+        d_loss_fake = discriminator.train_on_batch(fake, invalid)
+        d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
+        # ---------------------
+        #  训练生成器
+        # ---------------------
+        noise = np.random.normal(0, 1, (batch_size, latent_dim))
+        g_loss = combined.train_on_batch(noise, valid)
+        # 显示进度
+        print("%d [Discriminator loss: %f, accuracy: %.2f%%] [Generator loss: %f]" % (
+            epoch, d_loss[0], 100 * d_loss[1], g_loss))
+    return generator

二進制
Network/GAN/__pycache__/Discriminator.cpython-36.pyc


二進制
Network/GAN/__pycache__/Generator.cpython-36.pyc


二進制
Network/GAN/__pycache__/RunNetwork.cpython-36.pyc


二進制
Network/__pycache__/ExponentialMovingAverage.cpython-36.pyc


+ 1 - 0
README.md

@@ -0,0 +1 @@
+# Ids-Based-On-GAN-CNN

+ 105 - 0
main.py

@@ -0,0 +1,105 @@
+import numpy as np
+from tensorflow.keras.models import load_model
+
+from Data.Preprocess import get_data
+from GenerateAbnormalData import generate_abnormal_data
+from IDS.CNN.CNNetwork import run_cnn_network, evaluate_cnn_network
+from Network.BiGAN.RunBiGanNetwork import run_bigan
+from Network.GAN.RunNetwork import run_network
+
+
+def get_ids_train_data():
+    data = get_data('train')
+    _train_data = data['all'][0]
+    _train_label = data['all'][1]
+    labels = ['label-dos', 'label-normal', 'label-probe', 'label-r2l', 'label-u2r']
+    for label in ['normal', 'u2r', 'r2l', 'probe', 'dos']:
+        print('[-]' + label + '数据量为:' + str(data[label][0].shape[0]))
+    while True:
+        model_name = input('[-]请输入生成器模型名称(输入go开始训练IDS):')
+        if model_name == 'go':
+            break
+        label_name = input('[-]请输入生成器数据类型(dos,normal,probe,r2l,u2r):')
+        try:
+            generator = load_model('Models/Gan/' + model_name + '.h5')
+        except Exception as e:
+            print('[-]没有此模型,名字输错啦')
+        num = int(input('[-]请输入生成数据量:'))
+        fake_abnormal_data_x = generate_abnormal_data(generator, num)
+        _train_data = np.append(_train_data, fake_abnormal_data_x, axis=0)
+
+        fake_abnormal_data_y = np.zeros((fake_abnormal_data_x.shape[0], 5))
+        for i in range(5):
+            if labels[i].endswith(label_name):
+                fake_abnormal_data_y[::, i] = 1
+        _train_label = np.append(_train_label, fake_abnormal_data_y, axis=0)
+
+    return _train_data, _train_label
+
+
+def train_gan():
+    type_of_gan = input('[-]请输入神经网络模型类型( ① GAN ② BIGAN ):')
+    model_name = input('[-]请输入模型名称:')
+    data_class = input('[-]请输入需要生成的数据:')
+    data = get_data('train')[data_class]
+    epochs = int(input('[-]请输入训练次数:'))
+    if int(type_of_gan) == 1:
+        generator = run_network(data[0], data[1], epochs)
+        generator.save('Models/Gan/' + 'gan_' + model_name + '_' + data_class + '.h5')
+    elif int(type_of_gan) == 2:
+        generator = run_bigan(data[0], data[1], epochs)
+        generator.save('Models/Gan/' + 'bigan_' + model_name + '_' + data_class + '.h5')
+
+
+def train_ids_without_gan():
+    model_name = input('[-]请输入模型名称:')
+    data = get_data('train')['all']
+    epochs = int(input('[-]请输入训练次数:'))
+    cnn = run_cnn_network(data[0], data[1], epochs)
+    cnn.save('Models/IDS/' + model_name + '.h5')
+
+
+def train_ids_with_gan():
+    train_data, train_label = get_ids_train_data()
+    model_name = input('[-]请输入IDS模型名称:')
+    epochs = int(input('[-]请输入训练次数:'))
+    cnn = run_cnn_network(train_data, train_label, epochs)
+    cnn.save('Models/IDS/' + model_name + '.h5')
+
+
+def load_and_test_ids():
+    model_name = input('[-]请输入IDS模型名称:')
+    cnn = load_model('Models/IDS/' + model_name + '.h5')
+    data = get_data('test')
+    losses = []
+    accuracies = []
+    flags = []
+    while True:
+        opt_flag = input('[-]请输入需要测试的样本集(all,normal,u2r,r2l,probe):')
+        flags.append(opt_flag)
+        try:
+            loss, accuracy = evaluate_cnn_network(cnn, data[opt_flag][0], data[opt_flag][1], 'KDD' + opt_flag)
+            losses.append(loss)
+            accuracies.append(accuracy)
+            for i in range(len(losses)):
+                print('[-]( 测试集: KDD-' + flags[i] + ' )  测试损失为:' + str(losses[i]) + '  准确度为:' + str(accuracies[i]))
+        except KeyError as e:
+            break
+
+
+def main():
+    flag = input('[-]请输入操作( ① GAN模型 ② IDS模型 ):')
+    if int(flag) == 1:
+        train_gan()
+    elif int(flag) == 2:
+        flag_for_ids = input('[-] ① 单独训练IDS模型 ② 使用GAN生成数据并训练IDS模型 ③ 加载并测试IDS模型 :')
+        if int(flag_for_ids) == 1:
+            train_ids_without_gan()
+        elif int(flag_for_ids) == 2:
+            train_ids_with_gan()
+        elif int(flag_for_ids) == 3:
+            load_and_test_ids()
+
+
+if __name__ == '__main__':
+    main()