Home > Article > Backend Development > BCEWithLogitsLoss in PyTorch
Buy Me a Coffee☕
*Memos:
BCEWithLogitsLoss() can get the 0D or more D tensor of the zero or more values(float) computed by BCE Loss and Sigmoid from the 0D or more D tensor of zero or more elements as shown below:
*Memos:
import torch from torch import nn tensor1 = torch.tensor([ 8., -3., 0., 1., 5., -2.]) tensor2 = torch.tensor([-3., 7., 4., -2., -9., 6.]) # -w*(p*y*log(1/(1+exp(-x))+(1-y)*log(1-(1/1+exp(-x)))) # -1*(1*(-3)*log(1/(1+exp(-8)))+(1-(-3))*log(1-(1/(1+exp(-8))))) # ↓↓↓↓↓↓↓ # 32.0003 + 21.0486 + 0.6931 + 3.3133 + 50.0067 + 50.0067 = 82.8423 # 119.1890 / 6 = 19.8648 bcelogits = nn.BCEWithLogitsLoss() bcelogits(input=tensor1, target=tensor2) # tensor(19.8648) bcelogits # BCEWithLogitsLoss() print(bcelogits.weight) # None bcelogits.reduction # 'mean' bcelogits = nn.BCEWithLogitsLoss(weight=None, reduction='mean', pos_weight=None) bcelogits(input=tensor1, target=tensor2) # tensor(19.8648) bcelogits = nn.BCEWithLogitsLoss(reduction='sum') bcelogits(input=tensor1, target=tensor2) # tensor(119.1890) bcelogits = nn.BCEWithLogitsLoss(reduction='none') bcelogits(input=tensor1, target=tensor2) # tensor([32.0003, 21.0486, 0.6931, 3.3133, 50.0067, 12.1269]) bcelogits = nn.BCEWithLogitsLoss(weight=torch.tensor([0., 1., 2., 3., 4., 5.])) bcelogits(input=tensor1, target=tensor2) # tensor(48.8394) bcelogits = nn.BCEWithLogitsLoss( pos_weight=torch.tensor([0., 1., 2., 3., 4., 5.]) ) bcelogits(input=tensor1, target=tensor2) # tensor(28.5957) bcelogits = nn.BCEWithLogitsLoss(weight=torch.tensor(0.)) bcelogits(input=tensor1, target=tensor2) # tensor(0.) bcelogits = nn.BCEWithLogitsLoss(pos_weight=torch.tensor(0.)) bcelogits(input=tensor1, target=tensor2) # tensor(13.8338) bcelogits = nn.BCEWithLogitsLoss(weight=torch.tensor([0, 1, 2, 3, 4, 5])) bcelogits(input=tensor1, target=tensor2) # tensor(48.8394) bcelogits = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([0, 1, 2, 3, 4, 5])) bcelogits(input=tensor1, target=tensor2) # tensor(28.5957) bcelogits = nn.BCEWithLogitsLoss(weight=torch.tensor(0)) bcelogits(input=tensor1, target=tensor2) # tensor(0.) bcelogits = nn.BCEWithLogitsLoss(pos_weight=torch.tensor(0)) bcelogits(input=tensor1, target=tensor2) # tensor(13.8338) bcelogits = nn.BCEWithLogitsLoss( weight=torch.tensor([True, False, True, False, True, False]) ) bcelogits(input=tensor1, target=tensor2) # tensor(13.7834) bcelogits = nn.BCEWithLogitsLoss(weight=torch.tensor([False])) bcelogits(input=tensor1, target=tensor2) # tensor(0.) tensor1 = torch.tensor([[8., -3., 0.], [1., 5., -2.]]) tensor2 = torch.tensor([[-3., 7., 4.], [-2., -9., 6.]]) bcelogits = nn.BCEWithLogitsLoss() bcelogits(input=tensor1, target=tensor2) # tensor(19.8648) tensor1 = torch.tensor([[[8.], [-3.], [0.]], [[1.], [5.], [-2.]]]) tensor2 = torch.tensor([[[-3.], [7.], [4.]], [[-2.], [-9.], [6.]]]) bcelogits = nn.BCEWithLogitsLoss() bcelogits(input=tensor1, target=tensor2) # tensor(19.8648) tensor1 = torch.tensor([]) tensor2 = torch.tensor([]) bcelogits = nn.BCEWithLogitsLoss(reduction='mean') bcelogits(input=tensor1, target=tensor2) # tensor(nan) bcelogits = nn.BCEWithLogitsLoss(reduction='sum') bcelogits(input=tensor1, target=tensor2) # tensor(0.)
The above is the detailed content of BCEWithLogitsLoss in PyTorch. For more information, please follow other related articles on the PHP Chinese website!