From be1322381ed255652652e1fbefdb94030f94a4e8 Mon Sep 17 00:00:00 2001 From: Koha9 Date: Mon, 24 Jul 2023 17:49:45 +0900 Subject: [PATCH] =?UTF-8?q?=E4=BC=98=E5=8C=96AIMemory=E8=BF=90=E8=A1=8C?= =?UTF-8?q?=E6=95=88=E7=8E=87?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 对应版本2.9 优化以下错误提示:UserWarning: Creating a tensor from a list of numpy.ndarrays is extremely slow. Please consider converting the list to a single numpy.ndarray with numpy.array() before converting to a tensor. --- Aimbot-PPO-Python/Pytorch/aimemory.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/Aimbot-PPO-Python/Pytorch/aimemory.py b/Aimbot-PPO-Python/Pytorch/aimemory.py index 89aad78..8aa3ee3 100644 --- a/Aimbot-PPO-Python/Pytorch/aimemory.py +++ b/Aimbot-PPO-Python/Pytorch/aimemory.py @@ -107,12 +107,12 @@ class PPOMem: next_done=torch.Tensor([next_done[i]]).to(self.device), ) # send memories to training datasets - self.obs[roundTargetType] = torch.cat((self.obs[roundTargetType], torch.tensor(self.ob_bf[i]).to(self.device)), 0) - self.actions[roundTargetType] = torch.cat((self.actions[roundTargetType], torch.tensor(self.act_bf[i]).to(self.device)), 0) - self.dis_logprobs[roundTargetType] = torch.cat((self.dis_logprobs[roundTargetType], torch.tensor(self.dis_logprobs_bf[i]).to(self.device)), 0) - self.con_logprobs[roundTargetType] = torch.cat((self.con_logprobs[roundTargetType], torch.tensor(self.con_logprobs_bf[i]).to(self.device)), 0) + self.obs[roundTargetType] = torch.cat((self.obs[roundTargetType], torch.tensor(np.array(self.ob_bf[i])).to(self.device)), 0) + self.actions[roundTargetType] = torch.cat((self.actions[roundTargetType], torch.tensor(np.array(self.act_bf[i])).to(self.device)), 0) + self.dis_logprobs[roundTargetType] = torch.cat((self.dis_logprobs[roundTargetType], torch.tensor(np.array(self.dis_logprobs_bf[i])).to(self.device)), 0) + self.con_logprobs[roundTargetType] = torch.cat((self.con_logprobs[roundTargetType], torch.tensor(np.array(self.con_logprobs_bf[i])).to(self.device)), 0) self.rewards[roundTargetType] = torch.cat((self.rewards[roundTargetType], thisRewardsTensor), 0) - self.values[roundTargetType] = torch.cat((self.values[roundTargetType], torch.tensor(self.values_bf[i]).to(self.device)), 0) + self.values[roundTargetType] = torch.cat((self.values[roundTargetType], torch.tensor(np.array(self.values_bf[i])).to(self.device)), 0) self.advantages[roundTargetType] = torch.cat((self.advantages[roundTargetType], adv), 0) self.returns[roundTargetType] = torch.cat((self.returns[roundTargetType], rt), 0)