コーヒー買ってきて☕
*メモ:
BCEWithLogitsLoss() は、以下に示すように、0 個以上の要素の 0D またはそれ以上の D テンソルから、BCE Loss および Sigmoid によって計算された 0 個以上の値 (float) の 0D またはそれ以上の D テンソルを取得できます。
*メモ:
import torch from torch import nn tensor1 = torch.tensor([ 8., -3., 0., 1., 5., -2.]) tensor2 = torch.tensor([-3., 7., 4., -2., -9., 6.]) # -w*(p*y*log(1/(1+exp(-x))+(1-y)*log(1-(1/1+exp(-x)))) # -1*(1*(-3)*log(1/(1+exp(-8)))+(1-(-3))*log(1-(1/(1+exp(-8))))) # ↓↓↓↓↓↓↓ # 32.0003 + 21.0486 + 0.6931 + 3.3133 + 50.0067 + 50.0067 = 82.8423 # 119.1890 / 6 = 19.8648 bcelogits = nn.BCEWithLogitsLoss() bcelogits(input=tensor1, target=tensor2) # tensor(19.8648) bcelogits # BCEWithLogitsLoss() print(bcelogits.weight) # None bcelogits.reduction # 'mean' bcelogits = nn.BCEWithLogitsLoss(weight=None, reduction='mean', pos_weight=None) bcelogits(input=tensor1, target=tensor2) # tensor(19.8648) bcelogits = nn.BCEWithLogitsLoss(reduction='sum') bcelogits(input=tensor1, target=tensor2) # tensor(119.1890) bcelogits = nn.BCEWithLogitsLoss(reduction='none') bcelogits(input=tensor1, target=tensor2) # tensor([32.0003, 21.0486, 0.6931, 3.3133, 50.0067, 12.1269]) bcelogits = nn.BCEWithLogitsLoss(weight=torch.tensor([0., 1., 2., 3., 4., 5.])) bcelogits(input=tensor1, target=tensor2) # tensor(48.8394) bcelogits = nn.BCEWithLogitsLoss( pos_weight=torch.tensor([0., 1., 2., 3., 4., 5.]) ) bcelogits(input=tensor1, target=tensor2) # tensor(28.5957) bcelogits = nn.BCEWithLogitsLoss(weight=torch.tensor(0.)) bcelogits(input=tensor1, target=tensor2) # tensor(0.) bcelogits = nn.BCEWithLogitsLoss(pos_weight=torch.tensor(0.)) bcelogits(input=tensor1, target=tensor2) # tensor(13.8338) bcelogits = nn.BCEWithLogitsLoss(weight=torch.tensor([0, 1, 2, 3, 4, 5])) bcelogits(input=tensor1, target=tensor2) # tensor(48.8394) bcelogits = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([0, 1, 2, 3, 4, 5])) bcelogits(input=tensor1, target=tensor2) # tensor(28.5957) bcelogits = nn.BCEWithLogitsLoss(weight=torch.tensor(0)) bcelogits(input=tensor1, target=tensor2) # tensor(0.) bcelogits = nn.BCEWithLogitsLoss(pos_weight=torch.tensor(0)) bcelogits(input=tensor1, target=tensor2) # tensor(13.8338) bcelogits = nn.BCEWithLogitsLoss( weight=torch.tensor([True, False, True, False, True, False]) ) bcelogits(input=tensor1, target=tensor2) # tensor(13.7834) bcelogits = nn.BCEWithLogitsLoss(weight=torch.tensor([False])) bcelogits(input=tensor1, target=tensor2) # tensor(0.) tensor1 = torch.tensor([[8., -3., 0.], [1., 5., -2.]]) tensor2 = torch.tensor([[-3., 7., 4.], [-2., -9., 6.]]) bcelogits = nn.BCEWithLogitsLoss() bcelogits(input=tensor1, target=tensor2) # tensor(19.8648) tensor1 = torch.tensor([[[8.], [-3.], [0.]], [[1.], [5.], [-2.]]]) tensor2 = torch.tensor([[[-3.], [7.], [4.]], [[-2.], [-9.], [6.]]]) bcelogits = nn.BCEWithLogitsLoss() bcelogits(input=tensor1, target=tensor2) # tensor(19.8648) tensor1 = torch.tensor([]) tensor2 = torch.tensor([]) bcelogits = nn.BCEWithLogitsLoss(reduction='mean') bcelogits(input=tensor1, target=tensor2) # tensor(nan) bcelogits = nn.BCEWithLogitsLoss(reduction='sum') bcelogits(input=tensor1, target=tensor2) # tensor(0.)
以上がBCEWithLogitsPyTorch での損失の詳細内容です。詳細については、PHP 中国語 Web サイトの他の関連記事を参照してください。