本文整理汇总了Python中theano.sandbox.rng_mrg.guess_n_streams函数的典型用法代码示例。如果您正苦于以下问题:Python guess_n_streams函数的具体用法?Python guess_n_streams怎么用?Python guess_n_streams使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了guess_n_streams函数的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_normal0
def test_normal0():
steps = 50
std = 2.
if (config.mode in ['DEBUG_MODE', 'DebugMode', 'FAST_COMPILE'] or
config.mode == 'Mode' and config.linker in ['py']):
sample_size = (25, 30)
default_rtol = .02
else:
sample_size = (999, 50)
default_rtol = .01
sample_size_odd = (sample_size[0], sample_size[1] - 1)
x = tensor.matrix()
for size, const_size, var_input, input, avg, rtol, std_tol in [
(sample_size, sample_size, [], [], -5., default_rtol, default_rtol),
(x.shape, sample_size, [x],
[np.zeros(sample_size, dtype=config.floatX)],
-5., default_rtol, default_rtol),
# test odd value
(x.shape, sample_size_odd, [x],
[np.zeros(sample_size_odd, dtype=config.floatX)],
-5., default_rtol, default_rtol),
(sample_size, sample_size, [], [],
np.arange(np.prod(sample_size),
dtype='float32').reshape(sample_size),
10. * std / np.sqrt(steps), default_rtol),
# test empty size (scalar)
((), (), [], [], -5., default_rtol, 0.02),
# test with few samples at the same time
((1,), (1,), [], [], -5., default_rtol, 0.02),
((3,), (3,), [], [], -5., default_rtol, 0.02),
]:
R = MRG_RandomStreams(234)
# Note: we specify `nstreams` to avoid a warning.
n = R.normal(size=size, avg=avg, std=std,
nstreams=rng_mrg.guess_n_streams(size, warn=False))
f = theano.function(var_input, n)
f(*input)
# Increase the number of steps if size implies only a few samples
if np.prod(const_size) < 10:
steps_ = steps * 50
else:
steps_ = steps
basictest(f, steps_, const_size, target_avg=avg, target_std=std,
prefix='mrg ', allow_01=True, inputs=input,
mean_rtol=rtol, std_tol=std_tol)
sys.stdout.flush()
RR = theano.tensor.shared_randomstreams.RandomStreams(234)
nn = RR.normal(size=size, avg=avg, std=std)
ff = theano.function(var_input, nn)
basictest(ff, steps_, const_size, target_avg=avg, target_std=std,
prefix='numpy ', allow_01=True, inputs=input, mean_rtol=rtol)
开发者ID:EugenePY,项目名称:Theano,代码行数:59,代码来源:test_rng_mrg.py
示例2: test_uniform
def test_uniform():
# TODO: test param low, high
# TODO: test size=None
# TODO: test ndim!=size.ndim
# TODO: test bad seed
# TODO: test size=Var, with shape that change from call to call
if (config.mode in ['DEBUG_MODE', 'DebugMode', 'FAST_COMPILE'] or
config.mode == 'Mode' and config.linker in ['py']):
sample_size = (10, 100)
steps = 50
else:
sample_size = (500, 50)
steps = int(1e3)
x = tensor.matrix()
for size, const_size, var_input, input in [
(sample_size, sample_size, [], []),
(x.shape, sample_size, [x],
[np.zeros(sample_size, dtype=config.floatX)]),
((x.shape[0], sample_size[1]), sample_size, [x],
[np.zeros(sample_size, dtype=config.floatX)]),
# test empty size (scalar)
((), (), [], []),
]:
# TEST CPU IMPLEMENTATION
# The python and C implementation are tested with DebugMode
x = tensor.matrix()
R = MRG_RandomStreams(234)
# Note: we specify `nstreams` to avoid a warning.
# TODO Look for all occurrences of `guess_n_streams` and `30 * 256`
# for such situations: it would be better to instead filter the
# warning using the warning module.
u = R.uniform(size=size,
nstreams=rng_mrg.guess_n_streams(size, warn=False))
f = theano.function(var_input, u)
assert any([isinstance(node.op, theano.sandbox.rng_mrg.mrg_uniform)
for node in f.maker.fgraph.toposort()])
f(*input)
# Increase the number of steps if sizes implies only a few samples
if np.prod(const_size) < 10:
steps_ = steps * 100
else:
steps_ = steps
basictest(f, steps_, const_size, prefix='mrg cpu', inputs=input)
RR = theano.tensor.shared_randomstreams.RandomStreams(234)
uu = RR.uniform(size=size)
ff = theano.function(var_input, uu)
# It's not our problem if numpy generates 0 or 1
basictest(ff, steps_, const_size, prefix='numpy',
allow_01=True, inputs=input)
开发者ID:EugenePY,项目名称:Theano,代码行数:54,代码来源:test_rng_mrg.py
示例3: test_normal0
def test_normal0():
steps = 50
std = 2.
if (mode in ['DEBUG_MODE', 'DebugMode', 'FAST_COMPILE'] or
mode == 'Mode' and config.linker in ['py']):
sample_size = (25, 30)
default_rtol = .02
else:
sample_size = (999, 50)
default_rtol = .01
sample_size_odd = (sample_size[0], sample_size[1] - 1)
x = tensor.matrix()
for size, const_size, var_input, input, avg, rtol, std_tol in [
(sample_size, sample_size, [], [], -5., default_rtol, default_rtol),
(x.shape, sample_size, [x],
[numpy.zeros(sample_size, dtype=config.floatX)],
-5., default_rtol, default_rtol),
((x.shape[0], sample_size[1]), sample_size, [x],
[numpy.zeros(sample_size, dtype=config.floatX)],
-5., default_rtol, default_rtol),
#test odd value
(sample_size_odd, sample_size_odd, [], [], -5.,
default_rtol, default_rtol),
#test odd value
(x.shape, sample_size_odd, [x],
[numpy.zeros(sample_size_odd, dtype=config.floatX)],
-5., default_rtol, default_rtol),
(sample_size, sample_size, [], [],
numpy.arange(numpy.prod(sample_size),
dtype='float32').reshape(sample_size),
10. * std / numpy.sqrt(steps), default_rtol),
# test empty size (scalar)
((), (), [], [], -5., default_rtol, 0.02),
# test with few samples at the same time
((1,), (1,), [], [], -5., default_rtol, 0.02),
((2,), (2,), [], [], -5., default_rtol, 0.02),
((3,), (3,), [], [], -5., default_rtol, 0.02),
]:
#print ''
#print 'ON CPU:'
R = MRG_RandomStreams(234, use_cuda=False)
# Note: we specify `nstreams` to avoid a warning.
n = R.normal(size=size, avg=avg, std=std,
nstreams=rng_mrg.guess_n_streams(size, warn=False))
f = theano.function(var_input, n, mode=mode)
#theano.printing.debugprint(f)
out = f(*input)
#print 'random?[:10]\n', out[0, 0:10]
# Increase the number of steps if size implies only a few samples
if numpy.prod(const_size) < 10:
steps_ = steps * 50
else:
steps_ = steps
basictest(f, steps_, const_size, target_avg=avg, target_std=std,
prefix='mrg ', allow_01=True, inputs=input,
mean_rtol=rtol, std_tol=std_tol)
sys.stdout.flush()
if mode != 'FAST_COMPILE' and cuda_available:
#print ''
#print 'ON GPU:'
R = MRG_RandomStreams(234, use_cuda=True)
n = R.normal(size=size, avg=avg, std=std, dtype='float32',
nstreams=rng_mrg.guess_n_streams(size, warn=False))
#well, it's really that this test w GPU doesn't make sense otw
assert n.dtype == 'float32'
f = theano.function(var_input, theano.Out(
theano.sandbox.cuda.basic_ops.gpu_from_host(n),
borrow=True), mode=mode_with_gpu)
#theano.printing.debugprint(f)
sys.stdout.flush()
gpu_out = numpy.asarray(f(*input))
#print 'random?[:10]\n', gpu_out[0, 0:10]
#print '----'
sys.stdout.flush()
basictest(f, steps_, const_size, target_avg=avg, target_std=std,
prefix='gpu mrg ', allow_01=True, inputs=input,
mean_rtol=rtol, std_tol=std_tol)
# Need to allow some rounding error as their is float
# computation that are done on the gpu vs cpu
assert numpy.allclose(out, gpu_out, rtol=5e-6, atol=5e-6)
#print ''
#print 'ON CPU w NUMPY:'
RR = theano.tensor.shared_randomstreams.RandomStreams(234)
nn = RR.normal(size=size, avg=avg, std=std)
ff = theano.function(var_input, nn)
basictest(ff, steps_, const_size, target_avg=avg, target_std=std,
prefix='numpy ', allow_01=True, inputs=input, mean_rtol=rtol)
开发者ID:Donghuan,项目名称:Theano,代码行数:97,代码来源:test_rng_mrg.py
示例4: test_binomial
def test_binomial():
#TODO: test size=None, ndim=X
#TODO: test size=X, ndim!=X.ndim
#TODO: test random seed in legal value(!=0 and other)
#TODO: test sample_size not a multiple of guessed #streams
#TODO: test size=Var, with shape that change from call to call
#we test size in a tuple of int and a tensor.shape.
#we test the param p with int.
if (mode in ['DEBUG_MODE', 'DebugMode', 'FAST_COMPILE'] or
mode == 'Mode' and config.linker in ['py']):
sample_size = (10, 50)
steps = 50
rtol = 0.02
else:
sample_size = (500, 50)
steps = int(1e3)
rtol = 0.01
x = tensor.matrix()
v = tensor.vector()
for mean in [0.1, 0.5]:
for size, const_size, var_input, input in [
(sample_size, sample_size, [], []),
(x.shape, sample_size, [x],
[numpy.zeros(sample_size, dtype=config.floatX)]),
((x.shape[0], sample_size[1]), sample_size, [x],
[numpy.zeros(sample_size, dtype=config.floatX)]),
# test empty size (scalar)
((), (), [], []),
]:
#print ''
#print 'ON CPU with size=(%s) and mean(%d):' % (str(size), mean)
R = MRG_RandomStreams(234, use_cuda=False)
# Note: we specify `nstreams` to avoid a warning.
u = R.binomial(size=size, p=mean,
nstreams=rng_mrg.guess_n_streams(size, warn=False))
f = theano.function(var_input, u, mode=mode)
#theano.printing.debugprint(f)
out = f(*input)
#print 'random?[:10]\n', out[0, 0:10]
#print 'random?[-1,-10:]\n', out[-1, -10:]
# Increase the number of steps if sizes implies only a few samples
if numpy.prod(const_size) < 10:
steps_ = steps * 100
else:
steps_ = steps
basictest(f, steps_, const_size, prefix='mrg cpu',
inputs=input, allow_01=True,
target_avg=mean, mean_rtol=rtol)
if mode != 'FAST_COMPILE' and cuda_available:
#print ''
#print 'ON GPU with size=(%s) and mean(%d):' % (str(size), mean)
R = MRG_RandomStreams(234, use_cuda=True)
u = R.binomial(size=size, p=mean, dtype='float32',
nstreams=rng_mrg.guess_n_streams(size,
warn=False))
#well, it's really that this test w GPU doesn't make sense otw
assert u.dtype == 'float32'
f = theano.function(var_input, theano.Out(
theano.sandbox.cuda.basic_ops.gpu_from_host(u),
borrow=True), mode=mode_with_gpu)
#theano.printing.debugprint(f)
gpu_out = numpy.asarray(f(*input))
#print 'random?[:10]\n', gpu_out[0, 0:10]
#print 'random?[-1,-10:]\n', gpu_out[-1, -10:]
basictest(f, steps_, const_size, prefix='mrg gpu',
inputs=input, allow_01=True,
target_avg=mean, mean_rtol=rtol)
numpy.testing.assert_array_almost_equal(out, gpu_out,
decimal=6)
#print ''
#print 'ON CPU w NUMPY with size=(%s) and mean(%d):' % (str(size),
# mean)
RR = theano.tensor.shared_randomstreams.RandomStreams(234)
uu = RR.binomial(size=size, p=mean)
ff = theano.function(var_input, uu, mode=mode)
# It's not our problem if numpy generates 0 or 1
basictest(ff, steps_, const_size, prefix='numpy', allow_01=True,
inputs=input, target_avg=mean, mean_rtol=rtol)
开发者ID:Donghuan,项目名称:Theano,代码行数:85,代码来源:test_rng_mrg.py
示例5: test_uniform
def test_uniform():
#TODO: test param low, high
#TODO: test size=None
#TODO: test ndim!=size.ndim
#TODO: test bad seed
#TODO: test size=Var, with shape that change from call to call
if (mode in ['DEBUG_MODE', 'DebugMode', 'FAST_COMPILE'] or
mode == 'Mode' and config.linker in ['py']):
sample_size = (10, 100)
steps = 50
else:
sample_size = (500, 50)
steps = int(1e3)
x = tensor.matrix()
for size, const_size, var_input, input in [
(sample_size, sample_size, [], []),
(x.shape, sample_size, [x],
[numpy.zeros(sample_size, dtype=config.floatX)]),
((x.shape[0], sample_size[1]), sample_size, [x],
[numpy.zeros(sample_size, dtype=config.floatX)]),
# test empty size (scalar)
((), (), [], []),
]:
#### TEST CPU IMPLEMENTATION ####
# The python and C implementation are tested with DebugMode
#print ''
#print 'ON CPU with size=(%s):' % str(size)
x = tensor.matrix()
R = MRG_RandomStreams(234, use_cuda=False)
# Note: we specify `nstreams` to avoid a warning.
# TODO Look for all occurrences of `guess_n_streams` and `30 * 256`
# for such situations: it would be better to instead filter the
# warning using the warning module.
u = R.uniform(size=size,
nstreams=rng_mrg.guess_n_streams(size, warn=False))
f = theano.function(var_input, u, mode=mode)
assert any([isinstance(node.op, theano.sandbox.rng_mrg.mrg_uniform)
for node in f.maker.fgraph.toposort()])
#theano.printing.debugprint(f)
cpu_out = f(*input)
#print 'CPU: random?[:10], random?[-10:]'
#print cpu_out[0, 0:10]
#print cpu_out[-1, -10:]
# Increase the number of steps if sizes implies only a few samples
if numpy.prod(const_size) < 10:
steps_ = steps * 100
else:
steps_ = steps
basictest(f, steps_, const_size, prefix='mrg cpu', inputs=input)
if mode != 'FAST_COMPILE' and cuda_available:
#print ''
#print 'ON GPU with size=(%s):' % str(size)
R = MRG_RandomStreams(234, use_cuda=True)
u = R.uniform(size=size, dtype='float32',
nstreams=rng_mrg.guess_n_streams(size, warn=False))
# well, it's really that this test w GPU doesn't make sense otw
assert u.dtype == 'float32'
f = theano.function(var_input, theano.Out(
theano.sandbox.cuda.basic_ops.gpu_from_host(u),
borrow=True), mode=mode_with_gpu)
assert any([isinstance(node.op,
theano.sandbox.rng_mrg.GPU_mrg_uniform)
for node in f.maker.fgraph.toposort()])
#theano.printing.debugprint(f)
gpu_out = numpy.asarray(f(*input))
#print 'GPU: random?[:10], random?[-10:]'
#print gpu_out[0, 0:10]
#print gpu_out[-1, -10:]
basictest(f, steps_, const_size, prefix='mrg gpu', inputs=input)
numpy.testing.assert_array_almost_equal(cpu_out, gpu_out,
decimal=6)
#print ''
#print 'ON CPU w Numpy with size=(%s):' % str(size)
RR = theano.tensor.shared_randomstreams.RandomStreams(234)
uu = RR.uniform(size=size)
ff = theano.function(var_input, uu, mode=mode)
# It's not our problem if numpy generates 0 or 1
basictest(ff, steps_, const_size, prefix='numpy',
allow_01=True, inputs=input)
开发者ID:Donghuan,项目名称:Theano,代码行数:88,代码来源:test_rng_mrg.py
示例6: test_normal_truncation
def test_normal_truncation():
# just a copy of test_normal0 with extra bound check
steps = 50
std = 2.
# standard deviation is slightly less than for a regular Gaussian
# constant taken from scipy.stats.truncnorm.std(a=-2, b=2, loc=0., scale=1.)
target_std = .87962566103423978 * std
if (config.mode in ['DEBUG_MODE', 'DebugMode', 'FAST_COMPILE'] or
config.mode == 'Mode' and config.linker in ['py']):
sample_size = (25, 30)
default_rtol = .02
else:
sample_size = (999, 50)
default_rtol = .01
sample_size_odd = (sample_size[0], sample_size[1] - 1)
x = tensor.matrix()
test_cases = [
(sample_size, sample_size, [], [], -5., default_rtol, default_rtol),
(x.shape, sample_size, [x],
[np.zeros(sample_size, dtype=config.floatX)],
-5., default_rtol, default_rtol),
# test odd value
(x.shape, sample_size_odd, [x],
[np.zeros(sample_size_odd, dtype=config.floatX)],
-5., default_rtol, default_rtol),
(sample_size, sample_size, [], [],
np.arange(np.prod(sample_size),
dtype='float32').reshape(sample_size),
10. * std / np.sqrt(steps), default_rtol),
# test empty size (scalar)
((), (), [], [], -5., default_rtol, 0.02),
# test with few samples at the same time
((1,), (1,), [], [], -5., default_rtol, 0.02),
((3,), (3,), [], [], -5., default_rtol, 0.02),
]
for size, const_size, var_input, input, avg, rtol, std_tol in test_cases:
R = MRG_RandomStreams(234)
# Note: we specify `nstreams` to avoid a warning.
n = R.normal(size=size, avg=avg, std=std, truncate=True,
nstreams=rng_mrg.guess_n_streams(size, warn=False))
f = theano.function(var_input, n)
# check if truncated at 2*std
samples = f(*input)
assert np.all(avg + 2 * std - samples >= 0), \
("bad upper bound? %s %s" % (samples, avg + 2 * std))
assert np.all(samples - (avg - 2 * std) >= 0), \
("bad lower bound? %s %s" % (samples, avg - 2 * std))
# Increase the number of steps if size implies only a few samples
if np.prod(const_size) < 10:
steps_ = steps * 50
else:
steps_ = steps
basictest(f, steps_, const_size, target_avg=avg, target_std=target_std,
prefix='mrg ', allow_01=True, inputs=input,
mean_rtol=rtol, std_tol=std_tol)
sys.stdout.flush()
开发者ID:DEVESHTARASIA,项目名称:Theano,代码行数:62,代码来源:test_rng_mrg.py
示例7: test_normal0
def test_normal0():
steps = 50
std = 2.0
if mode in ["DEBUG_MODE", "DebugMode", "FAST_COMPILE"]:
sample_size = (25, 30)
default_rtol = 0.02
else:
sample_size = (999, 50)
default_rtol = 0.01
sample_size_odd = (sample_size[0], sample_size[1] - 1)
x = tensor.matrix()
for size, const_size, var_input, input, avg, rtol in [
(sample_size, sample_size, [], [], -5.0, default_rtol),
(x.shape, sample_size, [x], [numpy.zeros(sample_size, dtype=config.floatX)], -5.0, default_rtol),
(sample_size_odd, sample_size_odd, [], [], -5.0, default_rtol), # test odd value
(
x.shape,
sample_size_odd,
[x],
[numpy.zeros(sample_size_odd, dtype=config.floatX)],
-5.0,
default_rtol,
), # test odd value
(
sample_size,
sample_size,
[],
[],
numpy.arange(numpy.prod(sample_size), dtype="float32").reshape(sample_size),
10.0 * std / numpy.sqrt(steps),
),
]:
print ""
print "ON CPU:"
R = MRG_RandomStreams(234, use_cuda=False)
# Note: we specify `nstreams` to avoid a warning.
n = R.normal(size=size, avg=avg, std=std, nstreams=rng_mrg.guess_n_streams(size, warn=False))
f = theano.function(var_input, n, mode=mode)
theano.printing.debugprint(f)
out = f(*input)
print "random?[:10]\n", out[0, 0:10]
basictest(
f,
steps,
const_size,
target_avg=avg,
target_std=std,
prefix="mrg ",
allow_01=True,
inputs=input,
mean_rtol=rtol,
)
sys.stdout.flush()
if mode != "FAST_COMPILE" and cuda_available:
print ""
print "ON GPU:"
R = MRG_RandomStreams(234, use_cuda=True)
n = R.normal(
size=size, avg=avg, std=std, dtype="float32", nstreams=rng_mrg.guess_n_streams(size, warn=False)
)
assert n.dtype == "float32" # well, it's really that this test w GPU doesn't make sense otw
f = theano.function(
var_input, theano.Out(theano.sandbox.cuda.basic_ops.gpu_from_host(n), borrow=True), mode=mode_with_gpu
)
theano.printing.debugprint(f)
sys.stdout.flush()
gpu_out = numpy.asarray(f(*input))
print "random?[:10]\n", gpu_out[0, 0:10]
print "----"
sys.stdout.flush()
basictest(
f,
steps,
const_size,
target_avg=avg,
target_std=std,
prefix="gpu mrg ",
allow_01=True,
inputs=input,
mean_rtol=rtol,
)
# Need to allow some rounding error as their is float
# computation that are done on the gpu vs cpu
assert numpy.allclose(out, gpu_out, rtol=5e-6, atol=5e-6)
print ""
print "ON CPU w NUMPY:"
RR = theano.tensor.shared_randomstreams.RandomStreams(234)
nn = RR.normal(size=size, avg=avg, std=std)
ff = theano.function(var_input, nn)
basictest(
ff,
steps,
#.........这里部分代码省略.........
开发者ID:hamelphi,项目名称:Theano,代码行数:101,代码来源:test_rng_mrg.py
示例8: test_binomial
def test_binomial():
# TODO: test size=None, ndim=X
# TODO: test size=X, ndim!=X.ndim
# TODO: test random seed in legal value(!=0 and other)
# TODO: test sample_size not a multiple of guessed #streams
# TODO: test size=Var, with shape that change from call to call
# we test size in a tuple of int and a tensor.shape.
# we test the param p with int.
if mode in ["DEBUG_MODE", "DebugMode", "FAST_COMPILE"]:
sample_size = (10, 50)
steps = 50
rtol = 0.02
else:
sample_size = (500, 50)
steps = int(1e3)
rtol = 0.01
x = tensor.matrix()
v = tensor.vector()
for mean in [0.1, 0.5]:
for size, var_input, input in [
(sample_size, [], []),
(x.shape, [x], [numpy.zeros(sample_size, dtype=config.floatX)]),
]:
print ""
print "ON CPU with size=(%s) and mean(%d):" % (str(size), mean)
R = MRG_RandomStreams(234, use_cuda=False)
# Note: we specify `nstreams` to avoid a warning.
u = R.binomial(size=size, p=mean, nstreams=rng_mrg.guess_n_streams(size, warn=False))
f = theano.function(var_input, u, mode=mode)
theano.printing.debugprint(f)
out = f(*input)
print "random?[:10]\n", out[0, 0:10]
print "random?[-1,-10:]\n", out[-1, -10:]
basictest(
f, steps, sample_size, prefix="mrg cpu", inputs=input, allow_01=True, target_avg=mean, mean_rtol=rtol
)
if mode != "FAST_COMPILE" and cuda_available:
print ""
print "ON GPU with size=(%s) and mean(%d):" % (str(size), mean)
R = MRG_RandomStreams(234, use_cuda=True)
u = R.binomial(size=size, p=mean, dtype="float32", nstreams=rng_mrg.guess_n_streams(size, warn=False))
assert u.dtype == "float32" # well, it's really that this test w GPU doesn't make sense otw
f = theano.function(
var_input,
theano.Out(theano.sandbox.cuda.basic_ops.gpu_from_host(u), borrow=True),
mode=mode_with_gpu,
)
theano.printing.debugprint(f)
gpu_out = numpy.asarray(f(*input))
print "random?[:10]\n", gpu_out[0, 0:10]
print "random?[-1,-10:]\n", gpu_out[-1, -10:]
basictest(
f,
steps,
sample_size,
prefix="mrg gpu",
inputs=input,
allow_01=True,
target_avg=mean,
mean_rtol=rtol,
)
numpy.testing.assert_array_almost_equal(out, gpu_out, decimal=6)
print ""
print "ON CPU w NUMPY with size=(%s) and mean(%d):" % (str(size), mean)
RR = theano.tensor.shared_randomstreams.RandomStreams(234)
uu = RR.binomial(size=size, p=mean)
ff = theano.function(var_input, uu, mode=mode)
# It's not our problem if numpy generates 0 or 1
basictest(
ff, steps, sample_size, prefix="numpy", allow_01=True, inputs=input, target_avg=mean, mean_rtol=rtol
)
开发者ID:hamelphi,项目名称:Theano,代码行数:77,代码来源:test_rng_mrg.py
示例9: test_uniform
def test_uniform():
# TODO: test param low, high
# TODO: test size=None
# TODO: test ndim!=size.ndim
# TODO: test bad seed
# TODO: test size=Var, with shape that change from call to call
if mode in ["DEBUG_MODE", "DebugMode", "FAST_COMPILE"]:
sample_size = (10, 100)
steps = 50
else:
sample_size = (500, 50)
steps = int(1e3)
x = tensor.matrix()
for size, var_input, input in [
(sample_size, [], []),
(x.shape, [x], [numpy.zeros(sample_size, dtype=config.floatX)]),
]:
#### TEST CPU IMPLEMENTATION ####
# The python and C implementation are tested with DebugMode
print ""
print "ON CPU with size=(%s):" % str(size)
x = tensor.matrix()
R = MRG_RandomStreams(234, use_cuda=False)
# Note: we specify `nstreams` to avoid a warning.
# TODO Look for all occurrences of `guess_n_streams` and `30 * 256`
# for such situations: it would be better to instead filter the
# warning using the warning module.
u = R.uniform(size=size, nstreams=rng_mrg.guess_n_streams(size, warn=False))
f = theano.function(var_input, u, mode=mode)
assert any([isinstance(node.op, theano.sandbox.rng_mrg.mrg_uniform) for node in f.maker.env.toposort()])
theano.printing.debugprint(f)
cpu_out = f(*input)
print "CPU: random?[:10], random?[-10:]"
print cpu_out[0, 0:10]
print cpu_out[-1, -10:]
basictest(f, steps, sample_size, prefix="mrg cpu", inputs=input)
if mode != "FAST_COMPILE" and cuda_available:
print ""
print "ON GPU with size=(%s):" % str(size)
R = MRG_RandomStreams(234, use_cuda=True)
u = R.uniform(size=size, dtype="float32", nstreams=rng_mrg.guess_n_streams(size, warn=False))
assert u.dtype == "float32" # well, it's really that this test w GPU doesn't make sense otw
f = theano.function(
var_input, theano.Out(theano.sandbox.cuda.basic_ops.gpu_from_host(u), borrow=True), mode=mode_with_gpu
)
assert any([isinstance(node.op, theano.sandbox.rng_mrg.GPU_mrg_uniform) for node in f.maker.env.toposort()])
theano.printing.debugprint(f)
gpu_out = numpy.asarray(f(*input))
print "GPU: random?[:10], random?[-10:]"
print gpu_out[0, 0:10]
print gpu_out[-1, -10:]
basictest(f, steps, sample_size, prefix="mrg gpu", inputs=input)
numpy.testing.assert_array_almost_equal(cpu_out, gpu_out, decimal=6)
print ""
print "ON CPU w Numpy with size=(%s):" % str(size)
RR = theano.tensor.shared_randomstreams.RandomStreams(234)
uu = RR.uniform(size=size)
ff = theano.function(var_input, uu, mode=mode)
# It's not our problem if numpy generates 0 or 1
basictest(ff, steps, sample_size, prefix="numpy", allow_01=True, inputs=input)
开发者ID:hamelphi,项目名称:Theano,代码行数:68,代码来源:test_rng_mrg.py
注:本文中的theano.sandbox.rng_mrg.guess_n_streams函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论