can someone please tell me the difference between these following two notations, one is working, the other one isn't but they seem the same to me.
def GradientDescentCostTest():
theta = numpy.array([0, 0, 0], numpy.float)
features = numpy.array([[80, 20, 0], [65, 30, 1], [70, 23, 1]], numpy.float)
values = numpy.array([20, 10, 14], numpy.float)
# This works and returns a value for cost:
sumOfSquareErrors = numpy.square(numpy.dot(features, theta) - values).sum()
cost = sumOfSquareErrors/(len(values)*2)
# This doesn't work and returns value 0.0 for cost:
cost = 1/(len(values)*2) * numpy.square(numpy.dot(features, theta) - values).sum()
return cost
print GradientDescentCostTest()
Thanks