Kaynağa Gözat

完成,等待GAN部分接入

Shellmiao 3 yıl önce
ebeveyn
işleme
bc64702c04

+ 1 - 1
.idea/IdsUsingCNN.iml

@@ -2,7 +2,7 @@
 <module type="PYTHON_MODULE" version="4">
   <component name="NewModuleRootManager">
     <content url="file://$MODULE_DIR$" />
-    <orderEntry type="inheritedJdk" />
+    <orderEntry type="jdk" jdkName="Python 3.6 (pythonkeras) (2)" jdkType="Python SDK" />
     <orderEntry type="sourceFolder" forTests="false" />
   </component>
 </module>

+ 1 - 1
.idea/misc.xml

@@ -1,4 +1,4 @@
 <?xml version="1.0" encoding="UTF-8"?>
 <project version="4">
-  <component name="ProjectRootManager" version="2" project-jdk-name="Python 3.8 (base)" project-jdk-type="Python SDK" />
+  <component name="ProjectRootManager" version="2" project-jdk-name="Python 3.6 (pythonkeras) (2)" project-jdk-type="Python SDK" />
 </project>

BIN
CNNetwork.h5


+ 34 - 0
CNNetwork/CNNetwork.py

@@ -0,0 +1,34 @@
+import numpy as np
+from keras.callbacks import EarlyStopping
+from keras.models import Sequential
+from keras.layers import Dense, Conv1D, MaxPooling1D, Flatten
+from sklearn.metrics import confusion_matrix
+
+
+def build_cnn_network():
+    model = Sequential()
+    model.add(Conv1D(32, kernel_size=5, strides=1,
+                     activation='relu',
+                     input_shape=(122, 1)))
+    model.add(MaxPooling1D(pool_size=2, strides=2))
+    model.add(Conv1D(64, 5, activation='relu'))
+    model.add(MaxPooling1D(pool_size=2))
+    model.add(Flatten())
+    model.add(Dense(1000, activation='relu'))
+    model.add(Dense(5, activation='softmax'))
+    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
+    return model
+
+
+def run_cnn_network(train, target, epochs):
+    cnn = build_cnn_network()
+    train = np.expand_dims(train, axis=2)
+    early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=3, verbose=0, mode='auto')
+    cnn.fit(x=train, y=target, epochs=epochs, validation_split=0.1, batch_size=128, callbacks=[early_stopping])
+    return cnn
+
+
+def evaluate_cnn_network(cnn, test, test_target, flag):
+    loss, accuracy = cnn.evaluate(np.expand_dims(test, axis=2), test_target, batch_size=None, verbose=1)
+    # print('[-]( 测试集: ' + flag + ' )  测试损失为:' + str(loss) + '  准确度为:' + str(accuracy))
+    return loss, accuracy

BIN
CNNetwork_epochs_1000.h5


+ 1 - 0
Data/AttackTypes.csv

@@ -0,0 +1 @@
+apache2,dos
back,dos
buffer_overflow,u2r
ftp_write,r2l
guess_passwd,r2l
httptunnel,u2r
imap,r2l
ipsweep,probe
land,dos
loadmodule,u2r
mailbomb,dos
mscan,probe
multihop,r2l
named,r2l
neptune,dos
nmap,probe
perl,u2r
phf,r2l
pod,dos
portsweep,probe
processtable,dos
ps,u2r
rootkit,u2r
saint,probe
satan,probe
sendmail,r2l
smurf,dos
snmpguess,u2r
snmpgetattack,r2l
spy,r2l
sqlattack,u2r
teardrop,dos
udpstorm,dos
warezclient,r2l
warezmaster,r2l
worm,u2r
xlock,r2l
xsnoop,r2l
xterm,u2r
normal,normal
unknown,unknown

Dosya farkı çok büyük olduğundan ihmal edildi
+ 0 - 0
Data/KDDTest+.csv


Dosya farkı çok büyük olduğundan ihmal edildi
+ 0 - 0
Data/KDDTrain+.csv


+ 109 - 0
Data/Preprocess.py

@@ -0,0 +1,109 @@
+import pandas as pd
+import numpy as np
+from sklearn.preprocessing import MinMaxScaler
+
+
+def get_cols():
+    with open('Data/kddcup.names', 'r') as infile:
+        kdd_names = infile.readlines()
+    kdd_cols = [x.split(':')[0] for x in kdd_names[1:]]
+    kdd_cols += ['class', 'difficulty']
+    return kdd_cols
+
+
+def get_attack_map():
+    attack_map = [x.strip().split(',') for x in open('Data/AttackTypes.csv', 'r')]
+    attack_map = {k: v for (k, v) in attack_map}
+    return attack_map
+
+
+def cat_encode(df, col):
+    return pd.concat([df.drop(col, axis=1), pd.get_dummies(df[col].values)], axis=1)
+
+
+def log_trns(df, col):
+    return df[col].apply(np.log1p)
+
+
+# 数据归一化
+def rescale_features(data):
+    min_max_scaler = MinMaxScaler()
+    data = min_max_scaler.fit_transform(data)
+    return data
+
+
+def get_normal_and_abnormal_data(kdd_t):
+    normal_kdd_t = kdd_t[kdd_t['class'].isin(['normal'])]
+    abnormal_kdd_t = kdd_t[~kdd_t['class'].isin(['normal'])]
+    test_target = kdd_t['class']
+    test_target = pd.get_dummies(test_target)
+    normal_kdd_t.pop('difficulty')
+    abnormal_kdd_t.pop('difficulty')
+    normal_test_target = normal_kdd_t.pop('class')
+    abnormal_test_target = abnormal_kdd_t.pop('class')
+    normal_test_target = pd.get_dummies(normal_test_target)
+    abnormal_test_target = pd.get_dummies(abnormal_test_target)
+    for col in test_target.columns:
+        if col not in normal_test_target.columns:
+            normal_test_target[col] = 0
+    normal_test_target = normal_test_target[test_target.columns]
+    for col in test_target.columns:
+        if col not in abnormal_test_target.columns:
+            abnormal_test_target[col] = 0
+    abnormal_test_target = abnormal_test_target[test_target.columns]
+    normal_test = normal_kdd_t.values
+    normal_test_target = normal_test_target.values
+    normal_test = rescale_features(normal_test)
+    abnormal_test = abnormal_kdd_t.values
+    abnormal_test_target = abnormal_test_target.values
+    abnormal_test = rescale_features(abnormal_test)
+    return normal_test, normal_test_target, abnormal_test, abnormal_test_target
+
+
+def get_data():
+    kdd_cols = get_cols()
+    kdd = pd.read_csv('Data/KDDTrain+.csv', names=kdd_cols)
+    kdd_t = pd.read_csv('Data/KDDTest+.csv', names=kdd_cols)
+    kdd_cols = [kdd.columns[0]] + sorted(list(set(kdd.protocol_type.values))) + sorted(
+        list(set(kdd.service.values))) + sorted(list(set(kdd.flag.values))) + kdd.columns[4:].tolist()
+    attack_map = get_attack_map()
+    kdd['class'] = kdd['class'].replace(attack_map)
+    kdd_t['class'] = kdd_t['class'].replace(attack_map)
+    cat_lst = ['protocol_type', 'service', 'flag']
+    for col in cat_lst:
+        kdd = cat_encode(kdd, col)
+        kdd_t = cat_encode(kdd_t, col)
+    log_lst = ['duration', 'src_bytes', 'dst_bytes']
+    for col in log_lst:
+        kdd[col] = log_trns(kdd, col)
+        kdd_t[col] = log_trns(kdd_t, col)
+    kdd = kdd[kdd_cols]
+    # 多类别转0、1编码时,kdd_t的类别比kdd少
+    for col in kdd_cols:
+        if col not in kdd_t.columns:
+            kdd_t[col] = 0
+    kdd_t = kdd_t[kdd_cols]
+
+    normal_test, normal_test_target, abnormal_test, abnormal_test_target = get_normal_and_abnormal_data(kdd_t)
+
+    kdd.pop('difficulty')
+    target = kdd.pop('class')
+    kdd_t.pop('difficulty')
+    test_target = kdd_t.pop('class')
+
+    target = pd.get_dummies(target)
+    test_target = pd.get_dummies(test_target)
+
+    # for idx, col in enumerate(list(test_target.columns)):
+    #     print(idx, col)
+
+    train = kdd.values
+    test = kdd_t.values
+
+    target = target.values
+    test_target = test_target.values
+
+    train = rescale_features(train)
+    test = rescale_features(test)
+
+    return train, target, test, test_target, normal_test, normal_test_target, abnormal_test, abnormal_test_target

+ 42 - 0
Data/kddcup.names

@@ -0,0 +1,42 @@
+back,buffer_overflow,ftp_write,guess_passwd,imap,ipsweep,land,loadmodule,multihop,neptune,nmap,normal,perl,phf,pod,portsweep,rootkit,satan,smurf,spy,teardrop,warezclient,warezmaster.
+duration: continuous.
+protocol_type: symbolic.
+service: symbolic.
+flag: symbolic.
+src_bytes: continuous.
+dst_bytes: continuous.
+land: symbolic.
+wrong_fragment: continuous.
+urgent: continuous.
+hot: continuous.
+num_failed_logins: continuous.
+logged_in: symbolic.
+num_compromised: continuous.
+root_shell: continuous.
+su_attempted: continuous.
+num_root: continuous.
+num_file_creations: continuous.
+num_shells: continuous.
+num_access_files: continuous.
+num_outbound_cmds: continuous.
+is_host_login: symbolic.
+is_guest_login: symbolic.
+count: continuous.
+srv_count: continuous.
+serror_rate: continuous.
+srv_serror_rate: continuous.
+rerror_rate: continuous.
+srv_rerror_rate: continuous.
+same_srv_rate: continuous.
+diff_srv_rate: continuous.
+srv_diff_host_rate: continuous.
+dst_host_count: continuous.
+dst_host_srv_count: continuous.
+dst_host_same_srv_rate: continuous.
+dst_host_diff_srv_rate: continuous.
+dst_host_same_src_port_rate: continuous.
+dst_host_srv_diff_host_rate: continuous.
+dst_host_serror_rate: continuous.
+dst_host_srv_serror_rate: continuous.
+dst_host_rerror_rate: continuous.
+dst_host_srv_rerror_rate: continuous.

+ 19 - 14
main.py

@@ -1,16 +1,21 @@
-# 这是一个示例 Python 脚本。
+from CNNetwork.CNNetwork import run_cnn_network, evaluate_cnn_network
+from Data.Preprocess import get_data
+from keras.models import load_model
 
-# 按 Shift+F10 执行或将其替换为您的代码。
-# 按 双击 Shift 在所有地方搜索类、文件、工具窗口、操作和设置。
-
-
-def print_hi(name):
-    # 在下面的代码行中使用断点来调试脚本。
-    print(f'Hi, {name}')  # 按 Ctrl+F8 切换断点。
-
-
-# 按间距中的绿色按钮以运行脚本。
 if __name__ == '__main__':
-    print_hi('PyCharm')
-
-# 访问 https://www.jetbrains.com/help/pycharm/ 获取 PyCharm 帮助
+    flag = input('[-]请输入操作( ① 加载模型 ②训练模型 ):')
+    model_name = input('[-]请输入模型名称:')
+    train, target, test, test_target, normal_test, normal_test_target, abnormal_test, abnormal_test_target = get_data()
+    if int(flag) == 2:
+        epochs = int(input('[-]请输入训练次数:'))
+        cnn = run_cnn_network(train, target, epochs)
+        cnn.save(model_name + '.h5')
+    else:
+        cnn = load_model(model_name + '.h5')
+        loss, accuracy = evaluate_cnn_network(cnn, test, test_target, 'KDDTest')
+        normal_loss, normal_accuracy = evaluate_cnn_network(cnn, normal_test, normal_test_target, 'KDDTest_normal')
+        abnormal_loss, abnormal_accuracy = evaluate_cnn_network(cnn, abnormal_test, abnormal_test_target,
+                                                                'KDDTest_abnormal')
+        print('[-]( 测试集: KDDTest )  测试损失为:' + str(loss) + '  准确度为:' + str(accuracy))
+        print('[-]( 测试集: KDDTest_normal )  测试损失为:' + str(normal_loss) + '  准确度为:' + str(normal_accuracy))
+        print('[-]( 测试集: KDDTest_abnormal )  测试损失为:' + str(abnormal_loss) + '  准确度为:' + str(abnormal_accuracy))

Bu fark içinde çok fazla dosya değişikliği olduğu için bazı dosyalar gösterilmiyor