100 lines
3.5 KiB
Python
100 lines
3.5 KiB
Python
|
import theano
|
||
|
import theano.tensor as T
|
||
|
|
||
|
k = T.iscalar("k")
|
||
|
A = T.vector("A")
|
||
|
|
||
|
# Symbolic description of the result
|
||
|
result, updates = theano.scan(fn=lambda prior_result, A: prior_result * A,
|
||
|
outputs_info=T.ones_like(A),
|
||
|
non_sequences=A,
|
||
|
n_steps=k)
|
||
|
|
||
|
# We only care about A**k, but scan has provided us with A**1 through A**k.
|
||
|
# Discard the values that we don't care about. Scan is smart enough to
|
||
|
# notice this and not waste memory saving them.
|
||
|
final_result = result[-1]
|
||
|
|
||
|
# compiled function that returns A**k
|
||
|
power = theano.function(inputs=[A,k], outputs=final_result, updates=updates)
|
||
|
|
||
|
print(power(range(10),2))
|
||
|
print(power(range(10),4))
|
||
|
|
||
|
print('P2:')
|
||
|
import numpy
|
||
|
|
||
|
coefficients = theano.tensor.vector("coefficients")
|
||
|
x = T.scalar("x")
|
||
|
|
||
|
max_coefficients_supported = 10000
|
||
|
|
||
|
# Generate the components of the polynomial
|
||
|
components, updates = theano.scan(fn=lambda coefficient, power, prior_result, free_variable: prior_result + (coefficient * (free_variable ** power)),
|
||
|
outputs_info=T.zeros(1),
|
||
|
sequences=[coefficients, theano.tensor.arange(max_coefficients_supported)],
|
||
|
non_sequences=x)
|
||
|
# Sum them up
|
||
|
polynomial = components.sum()
|
||
|
|
||
|
pol = components[-1]
|
||
|
|
||
|
# Compile a function
|
||
|
calculate_polynomial = theano.function(inputs=[coefficients, x], outputs=components)
|
||
|
|
||
|
# Test
|
||
|
test_coefficients = numpy.asarray([1, 0, 2], dtype=numpy.float32)
|
||
|
test_value = 3
|
||
|
print(calculate_polynomial(test_coefficients, test_value))
|
||
|
print(1.0 * (3 ** 0) + 0.0 * (3 ** 1) + 2.0 * (3 ** 2))
|
||
|
|
||
|
print('P3:')
|
||
|
import numpy as np
|
||
|
import theano
|
||
|
import theano.tensor as T
|
||
|
|
||
|
up_to = T.iscalar("up_to")
|
||
|
|
||
|
# define a named function, rather than using lambda
|
||
|
def accumulate_by_adding(arange_val, prior_result):
|
||
|
return prior_result + arange_val
|
||
|
seq = T.arange(up_to)
|
||
|
|
||
|
# An unauthorized implicit downcast from the dtype of 'seq', to that of
|
||
|
# 'T.as_tensor_variable(0)' which is of dtype 'int8' by default would occur
|
||
|
# if this instruction were to be used instead of the next one:
|
||
|
# outputs_info = T.as_tensor_variable(0)
|
||
|
|
||
|
outputs_info = T.as_tensor_variable(np.asarray(0, seq.dtype))
|
||
|
scan_result, scan_updates = theano.scan(fn=accumulate_by_adding,
|
||
|
outputs_info=outputs_info,
|
||
|
sequences=seq)
|
||
|
triangular_sequence = theano.function(inputs=[up_to], outputs=scan_result)
|
||
|
|
||
|
# test
|
||
|
some_num = 15
|
||
|
print(triangular_sequence(some_num))
|
||
|
print([n * (n + 1) // 2 for n in range(some_num)])
|
||
|
|
||
|
print('P4:')
|
||
|
location = T.imatrix("location")
|
||
|
values = T.vector("values")
|
||
|
output_model = T.matrix("output_model")
|
||
|
|
||
|
def set_value_at_position(a_location, a_value, output_model):
|
||
|
zeros = T.zeros_like(output_model)
|
||
|
zeros_subtensor = zeros[a_location[0], a_location[1]]
|
||
|
return T.set_subtensor(zeros_subtensor, a_value)
|
||
|
|
||
|
result, updates = theano.scan(fn=set_value_at_position,
|
||
|
outputs_info=None,
|
||
|
sequences=[location, values],
|
||
|
non_sequences=output_model)
|
||
|
|
||
|
assign_values_at_positions = theano.function(inputs=[location, values, output_model], outputs=result)
|
||
|
|
||
|
# test
|
||
|
test_locations = numpy.asarray([[1, 1], [2, 3]], dtype=numpy.int32)
|
||
|
test_values = numpy.asarray([42, 50], dtype=numpy.float32)
|
||
|
test_output_model = numpy.zeros((5, 5), dtype=numpy.float32)
|
||
|
print(assign_values_at_positions(test_locations, test_values, test_output_model))
|