Numerically stable softmax
This commit is contained in:
parent
6a0cdbea3a
commit
b9147c269c
@ -154,9 +154,9 @@ class CrossEntropySoftmaxError(object):
|
||||
Returns:
|
||||
Scalar error function value.
|
||||
"""
|
||||
probs = np.exp(outputs - outputs.max(-1)[:, None])
|
||||
probs /= probs.sum(-1)[:, None]
|
||||
return -np.mean(np.sum(targets * np.log(probs), axis=1))
|
||||
normOutputs = outputs - outputs.max(-1)[:, None]
|
||||
logProb = normOutputs - np.log(np.sum(np.exp(normOutputs))(-1)[:, None])
|
||||
return -np.mean(np.sum(targets * logProb, axis=1))
|
||||
|
||||
def grad(self, outputs, targets):
|
||||
"""Calculates gradient of error function with respect to outputs.
|
||||
|
Loading…
Reference in New Issue
Block a user