品牌营销型网站作用商城建站系统
2026/2/8 17:38:03 网站建设 项目流程
品牌营销型网站作用,商城建站系统,淮北人论坛招聘网,个人cms网站这是我的第449篇原创文章。一、引言CNN#xff08;卷积#xff09;擅长抓“局部模式”#xff0c;LSTM#xff08;长短时记忆网络#xff09;擅长记住“时间上的因果和长期依赖”#xff0c;Transformer#xff08;自注意力#xff09;擅长把序列里任意两个时刻相互比较…这是我的第449篇原创文章。一、引言CNN卷积擅长抓“局部模式”LSTM长短时记忆网络擅长记住“时间上的因果和长期依赖”Transformer自注意力擅长把序列里任意两个时刻相互比较、找全局相关性而且能并行处理。融合方式串联CNN → LSTM → Transformer。先提取局部特征再用 LSTM 建长期状态最后用 Transformer 做全局交互。下面通过一个具体的案例融合CNN LSTM Transformer进行多变量输入单变量输出单步时间序列预测包括模型构建、训练、预测等等。二、实现过程2.1 数据加载核心代码df pd.read_csv(data.csv, parse_dates[Date], index_col[0]) df pd.DataFrame(df)结果原始数据集总数52032.2 数据划分核心代码test_splitround(len(df)*0.20) df_for_trainingdf[:-test_split] df_for_testingdf[-test_split:]训练集4162测试集10412.3 数据归一化核心代码scaler MinMaxScaler(feature_range(0,1)) df_for_training_scaled scaler.fit_transform(df_for_training) df_for_testing_scaledscaler.transform(df_for_testing)2.4 构造时序数据集核心代码train_dataset TimeSeriesDataset(df_for_training_scaled, seq_len30, pred_len1) test_dataset TimeSeriesDataset(df_for_testing_scaled, seq_len30, pred_len1) train_loader DataLoader(train_dataset, batch_sizebatch_size, shuffleTrue) test_loader DataLoader(test_dataset, batch_sizebatch_size, shuffleFalse)时序训练集和测试集数组形状2.5 CNN_LSTM_Transformer模型核心代码class CNN_LSTM_Transformer(nn.Module): def __init__(self, input_dim5, cnn_channels16, lstm_hidden32, transformer_dim32, transformer_heads4, transformer_layers1, pred_len1): super().__init__() # CNN self.cnn nn.Conv1d(in_channelsinput_dim, out_channelscnn_channels, kernel_size3, padding1) self.cnn_relu nn.ReLU() # LSTM self.lstm nn.LSTM(input_sizecnn_channels, hidden_sizelstm_hidden, batch_firstTrue) # Transformer Encoder encoder_layer nn.TransformerEncoderLayer(d_modeltransformer_dim, nheadtransformer_heads, batch_firstTrue) self.transformer nn.TransformerEncoder(encoder_layer, num_layerstransformer_layers) # Projection layers self.proj_lstm nn.Linear(lstm_hidden, transformer_dim) self.pred_len pred_len self.fc_out nn.Linear(transformer_dim, pred_len) def forward(self, x): # x: [batch, seq_len, 1] batch_size, seq_len, _ x.shape # CNN expects [batch, channels, seq_len] cnn_out self.cnn_relu(self.cnn(x.transpose(1,2))) # [B, C, T] cnn_out cnn_out.transpose(1,2) # [B, T, C] # LSTM lstm_out, _ self.lstm(cnn_out) # [B, T, hidden] lstm_proj self.proj_lstm(lstm_out) # [B, T, transformer_dim] # Transformer trans_out self.transformer(lstm_proj) # [B, T, transformer_dim] # 取最后时间步输出预测 out self.fc_out(trans_out[:, -1, :]) # [B, pred_len] return out.unsqueeze(-1) # [B, pred_len, 1]2.6 训练模型核心代码def train_model(model, dataloader, num_epochs50, learning_rate1e-3, devicecpu): optimizer torch.optim.Adam(model.parameters(), lrlearning_rate) criterion nn.MSELoss() model.train() loss_history [] for epoch in range(num_epochs): epoch_losses [] for batch_data, batch_targets in dataloader: batch_data batch_data.to(device) batch_targets batch_targets.to(device) optimizer.zero_grad() outputs model(batch_data) loss criterion(outputs, batch_targets) loss.backward() optimizer.step() epoch_losses.append(loss.item()) avg_loss np.mean(epoch_losses) loss_history.append(avg_loss) if (epoch 1) % 10 0: print(fEpoch [{epoch 1}/{num_epochs}], Loss: {avg_loss:.4f}) return loss_history结果2.7 模型测试集评估核心代码def evaluate_model(model, dataloader, devicecpu): model.eval() preds [] trues [] with torch.no_grad(): for batch_data, batch_targets in dataloader: batch_data batch_data.to(device) outputs model(batch_data) preds.append(outputs.cpu().numpy()) trues.append(batch_targets.cpu().numpy()) preds np.concatenate(preds, axis0).squeeze() trues np.concatenate(trues, axis0).squeeze() return preds, trues2.8 结果可视化核心代码def visualize_results(loss_history, preds, trues): sns.set(font_scale1.2) plt.rc(font, family[Times New Roman, Simsun], size12) # 图 1训练损失曲线 # 模型在训练过程中损失的下降情况说明模型不断优化拟合数据。 plt.plot(loss_history, markero, colordodgerblue, linestyle-, linewidth2) plt.title(Training Loss Curve) plt.xlabel(Epoch) plt.ylabel(MSE Loss) plt.tight_layout() plt.savefig(output_image1.png, dpi300, formatpng) plt.show() # 图 2真实值与预测值对比曲线 # 对比曲线直观展示模型预测趋势与真实数据的匹配情况越接近表示模型效果越好。 plt.plot(trues, labelTrue Values, colorlimegreen) plt.plot(preds, labelPredicted Values, colorcrimson) plt.title(True vs. Predicted Values) plt.xlabel(Sample Index) plt.ylabel(Trend Value) plt.legend() plt.tight_layout() plt.savefig(output_image2.png, dpi300, formatpng) plt.show()图 1训练损失曲线图 2真实值与预测值对比曲线2.9 计算误差核心代码testScore1 math.sqrt(mean_squared_error(preds_test, trues_test)) print(Test Score: %.2f RMSE % (testScore1)) testScore2 mean_absolute_error(preds_test, trues_test) print(Test Score: %.2f MAE % (testScore2)) testScore3 r2_score(preds_test, trues_test) print(Test Score: %.2f R2 % (testScore3)) testScore4 mean_absolute_percentage_error(preds_test, trues_test) print(Test Score: %.2f MAPE % (testScore4))结果作者简介读研期间发表6篇SCI数据挖掘相关论文现在某研究院从事数据算法相关科研工作结合自身科研实践经历不定期分享关于Python、机器学习、深度学习、人工智能系列基础知识与应用案例。致力于只做原创以最简单的方式理解和学习关注我一起交流成长。需要数据集和源码的小伙伴可以关注底部公众号添加作者微信。

需要专业的网站建设服务?

联系我们获取免费的网站建设咨询和方案报价,让我们帮助您实现业务目标

立即咨询