## 导言

### 求解思路

#### 建立模型

flowchart TB
Input--n*2-->Normalization
Normalization--n*2-->GAT1.1
Normalization--n*2-->GAT1.2
Normalization--n*2-->GAT1.m
subgraph GAT1
GAT1.1
GAT1.2
GAT1.m
end
GAT1.1--n*2-->elu
GAT1.2--n*2-->elu
GAT1.m--n*2-->elu
elu--m*n*2-->GAT2
GAT2--n*n-->softmax
softmax--n*n-->output



## 实验过程

### 实验环境

• Intel(R) Core(TM) i7-6567U CPU @3.30GHZ 3.31GHz
• 8.00GB RAM
• Python 3.8.2 64-bit
• jupyter==1.0.0
• numpy==1.18.4
• torch==1.5.0+cpu
• torch-scatter==2.0.4
• torch-sparse==0.6.4
• torch-cluster==1.5.4
• torch-geometric==1.5.0
• jupyter==1.0.0
• numpy==1.18.4
• matplotlib==3.2.1

### 导入相关包和并数据集

import io
import math
import random
import copy
from matplotlib import pyplot
import torch
from torch import nn
from torch_geometric.data import InMemoryDataset
from torch_geometric.data import Data

def creat_dataset(num_data=10000,coord_range=100,num_nodes=10):
data_list = []
random.seed(0)
coords = random.sample([[i,j]for i in range(coord_range)for j in range(coord_range)], k=num_nodes)
flag = 1
for _i in range(num_data):
permutation = random.sample(range(num_nodes),k=num_nodes)
edge_index = [[permutation[i-1],permutation[i]] for i in range(len(permutation))]
for i in range(num_nodes):
for j in range(i):
if random.random()<0.5 and [i,j]not in edge_index:
edge_index.append([i,j])

if [i,j] in edge_index or [j,i] in edge_index  else 2.0*coord_range*num_nodes
for j in range(num_nodes)]for i in range(num_nodes)]

dp[1][0] = [0,[0]]
if ((s>>v)&1):
dp[s][v]=copy.deepcopy(dp[s^(1<<v)][u])
dp[s][v][1].append(v)

ans_tour = [[ans[1][i-1],ans[1][i]] for i in range(len(ans[1]))]

edge_info=[]
for j in range(i):
attr=1 if [i,j]in ans_tour or [j,i] in ans_tour else 0
edge_info.append([i,j,attr])
edge_info.append([j,i,attr])
if flag:
flag=0
x=[]
y=[]
for ed in ans_tour:
x.append(coords[ed[0]][0])
y.append(coords[ed[0]][1])
x.append(x[0])
y.append(y[0])
pyplot.plot(x,y)
pyplot.show()
print(ans_tour)
print(edge_info)
pos=coords

y=[[0 for j in range(len(pos))]for i in range(len(pos))]
edge_index=[]
edge_attr=[]
num_edges=len(edge_info)
for i in range(num_edges):
edge_index.append([edge_info[i][0],edge_info[i][1]])
edge_attr.append(edge_info[i][2])
if edge_attr[-1]:
y[edge_index[-1][0]][edge_index[-1][1]]=1

x=torch.tensor(pos, dtype=torch.float)
y=torch.tensor(y, dtype=torch.float)
edge_index = torch.tensor(edge_index, dtype=torch.long).t().contiguous()
edge_attr = torch.tensor(edge_attr, dtype=torch.float)
pos=torch.tensor(pos, dtype=torch.float)
data_list.append(Data(x=x, y=y, edge_index=edge_index,edge_attr=edge_attr,pos=pos))
print(_i,end='\r')
return data_list

torch.manual_seed(2020) # seed for reproducible numbers
dataset = creat_dataset()
train_dataset = dataset[0:8000]
test_dataset = dataset[8000:10000]

print(f"Number of Train Graphs :", len(train_dataset))
print(f"Number of Test Graphs :", len(test_dataset))
print(f"Number of Nodes per Graph:", train_dataset[0].num_nodes)
print(f"Number of Node Features:", train_dataset[0].num_node_features)


[[3, 0], [0, 6], [6, 8], [8, 4], [4, 5], [5, 9], [9, 1], [1, 7], [7, 2], [2, 3]]
[[1, 0, 0], [0, 1, 0], [2, 0, 0], [0, 2, 0], [3, 0, 1], [0, 3, 1], [3, 2, 1], [2, 3, 1], [4, 0, 0], [0, 4, 0], [4, 2, 0], [2, 4, 0], [5, 0, 0], [0, 5, 0], [5, 2, 0], [2, 5, 0], [5, 4, 1], [4, 5, 1], [6, 0, 1], [0, 6, 1], [7, 1, 1], [1, 7, 1], [7, 2, 1], [2, 7, 1], [7, 4, 0], [4, 7, 0], [7, 6, 0], [6, 7, 0], [8, 1, 0], [1, 8, 0], [8, 3, 0], [3, 8, 0], [8, 4, 1], [4, 8, 1], [8, 6, 1], [6, 8, 1], [8, 7, 0], [7, 8, 0], [9, 0, 0], [0, 9, 0], [9, 1, 1], [1, 9, 1], [9, 2, 0], [2, 9, 0], [9, 3, 0], [3, 9, 0], [9, 5, 1], [5, 9, 1], [9, 8, 0], [8, 9, 0]]
Number of Train Graphs : 8000
Number of Test Graphs : 2000
Number of Nodes per Graph: 10
Number of Node Features: 2


### 模型

import torch
from torch import nn
from torch_geometric.nn import GATConv

class GAT(nn.Module):
def __init__(self):
super(GAT, self).__init__()
self.hid = 10

self.norm1 = nn.BatchNorm1d(dataset[0].num_node_features)

def forward(self, data):
x, edge_index = data.x, data.edge_index

# Dropout before the GAT layer is used to avoid overfitting in small datasets like Cora.
# One can skip them if the dataset is sufficiently large.

x = self.norm1(x)
#x = nn.functional.dropout(x, p=0.6, training=self.training)
x = self.conv1(x, edge_index)
x = nn.functional.elu(x)
#x = nn.functional.dropout(x, p=0.6, training=self.training)
x = self.conv2(x, edge_index)

return nn.functional.softmax(x, dim=1)


### 训练模型并评估模型效果

from torch_geometric.data import DataLoader
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

model = GAT().to(device)
acc_list=[]
model.train()
for epoch in range(100):
model.train()
out = model(data)
loss = nn.functional.binary_cross_entropy(out, data.y)
loss.backward()
optimizer.step()
if epoch%1 == 0:
model.eval()
tot=0
acc=0
out = model(data)
out = out.detach().numpy().tolist()
for i in range(len(out)):
for j in range(len(out[i])):
out[i][j]=[out[i][j],j]
out[i].sort(reverse=True)
while(len(out[i])>2):
out[i].pop()
y=data.y.numpy().tolist()
for i in range(len(out)):
tot+=1
if y[i][out[i][0][1]] or y[i][out[i][1][1]]:
acc+=1
acc_list.append(acc/tot)
print(str(epoch)+'\t'+str(acc_list[-1]),end='\r')
if epoch % 10 == 9:
torch.save(model, './model.'+str(epoch)+'.pkl')
print(max(acc_list))
pyplot.plot(acc_list)
pyplot.show()

0.80629665