Theano sparse throws error on sampling

d=-1
acs=[0,.1,.3]
bcs=[0,-.1,-.2]

fx=[]
sx=[]

for i in xrange(200):
    tx=[0 for i in xrange(4)]
    ai=random.randrange(3)
    bi=random.randrange(3)
    if ai>0:
        tx[ai-1]=1
    if bi>0:
        tx[bi+1]=1
    dr=scipy.stats.norm.cdf(d+acs[ai]+bcs[bi])
    if np.random.uniform()<dr:
        sx.append(tx)
    else:
        fx.append(tx)

#above just generates sparse matrix for testing
sx=np.asarray(sx)
fx=np.asarray(fx)

#sxs= theano.sparse.as_sparse_variable(scipy.sparse.csr_matrix(sx))
#fxs= theano.sparse.as_sparse_variable(scipy.sparse.csr_matrix(fx))
sxs= (scipy.sparse.csr_matrix(sx))
fxs= (scipy.sparse.csr_matrix(fx))

with pm.Model() as model:
    inter=pm.Normal('d',sd=20)
    bs=pm.Normal('b',sd=20,shape=4)
    
    fz=pm.Bound(pm.Normal,upper=0.0)('fz',mu=theano.sparse.dot(fxs,bs)+inter,sd=1.0,shape=fx.shape[0])
    sz=pm.Bound(pm.Normal,lower=0.0)('sz',mu=theano.sparse.dot(sxs,bs)+inter,sd=1.0,shape=sx.shape[0])
    #fz=pm.Bound(pm.Normal,upper=0.0)('fz',mu=theano.dot(fx,bs)+inter,sd=1.0,shape=fx.shape[0])
    #sz=pm.Bound(pm.Normal,lower=0.0)('sz',mu=theano.dot(sx,bs)+inter,sd=1.0,shape=sx.shape[0])

    
    trace=pm.sample(3000,cores=2)


plt.figure(figsize=(7, 7))
pm.traceplot(trace,varnames=['b','d'])
plt.tight_layout()
plt.show()

Warning (from warnings module):
  File "/usr/local/lib/python2.7/dist-packages/h5py/__init__.py", line 36
    from ._conv import register_converters as _register_converters
FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
Auto-assigning NUTS sampler...
Initializing NUTS using jitter+adapt_diag...

Traceback (most recent call last):
  File "/home/ryan/Desktop/derping/josh-project/test_pymc.py", line 112, in <module>
    trace=pm.sample(3000,cores=2)
  File "/usr/local/lib/python2.7/dist-packages/pymc3/sampling.py", line 395, in sample
    progressbar=progressbar, **args)
  File "/usr/local/lib/python2.7/dist-packages/pymc3/sampling.py", line 1388, in init_nuts
    step = pm.NUTS(potential=potential, model=model, **kwargs)
  File "/usr/local/lib/python2.7/dist-packages/pymc3/step_methods/hmc/nuts.py", line 152, in __init__
    super(NUTS, self).__init__(vars, **kwargs)
  File "/usr/local/lib/python2.7/dist-packages/pymc3/step_methods/hmc/base_hmc.py", line 63, in __init__
    dtype=dtype, **theano_kwargs)
  File "/usr/local/lib/python2.7/dist-packages/pymc3/step_methods/arraystep.py", line 215, in __init__
    vars, dtype=dtype, **theano_kwargs)
  File "/usr/local/lib/python2.7/dist-packages/pymc3/model.py", line 708, in logp_dlogp_function
    return ValueGradFunction(self.logpt, grad_vars, extra_vars, **kwargs)
  File "/usr/local/lib/python2.7/dist-packages/pymc3/model.py", line 441, in __init__
    grad = tt.grad(self._cost_joined, self._vars_joined)
  File "/usr/local/lib/python2.7/dist-packages/theano/gradient.py", line 605, in grad
    grad_dict, wrt, cost_name)
  File "/usr/local/lib/python2.7/dist-packages/theano/gradient.py", line 1371, in _populate_grad_dict
    rval = [access_grad_cache(elem) for elem in wrt]
  File "/usr/local/lib/python2.7/dist-packages/theano/gradient.py", line 1326, in access_grad_cache
    term = access_term_cache(node)[idx]
  File "/usr/local/lib/python2.7/dist-packages/theano/gradient.py", line 1021, in access_term_cache
    output_grads = [access_grad_cache(var) for var in node.outputs]
  File "/usr/local/lib/python2.7/dist-packages/theano/gradient.py", line 1326, in access_grad_cache
    term = access_term_cache(node)[idx]
  File "/usr/local/lib/python2.7/dist-packages/theano/gradient.py", line 1021, in access_term_cache
    output_grads = [access_grad_cache(var) for var in node.outputs]
  File "/usr/local/lib/python2.7/dist-packages/theano/gradient.py", line 1326, in access_grad_cache
    term = access_term_cache(node)[idx]
  File "/usr/local/lib/python2.7/dist-packages/theano/gradient.py", line 1162, in access_term_cache
    new_output_grads)
  File "/usr/local/lib/python2.7/dist-packages/theano/gof/op.py", line 711, in L_op
    return self.grad(inputs, output_grads)
  File "/usr/local/lib/python2.7/dist-packages/theano/sparse/basic.py", line 4057, in grad
    rval.append(tensor.dot(gz, y.T))
  File "/usr/local/lib/python2.7/dist-packages/theano/tensor/basic.py", line 6086, in dot
    return _dot(a, b)
  File "/usr/local/lib/python2.7/dist-packages/theano/gof/op.py", line 674, in __call__
    required = thunk()
  File "/usr/local/lib/python2.7/dist-packages/theano/gof/op.py", line 892, in rval
    r = p(n, [x[0] for x in i], o)
  File "/usr/local/lib/python2.7/dist-packages/theano/tensor/basic.py", line 5951, in perform
    z[0] = np.asarray(np.dot(x, y))
ValueError: shapes (164,) and (4,) not aligned: 164 (dim 0) != 4 (dim 0)

The code runs with the above error. However when I replace all the sparse operations with not sparse operations, by using the commented out definitions of sz and fz the code then runs without issue. Can someone help me get sparse matrices working?

I am using
python 2.7
Theano 1.01
pymc 3.4rc1

You are quite close - just need to be more careful of the shape:

    bs = pm.Normal('b', sd=20, shape=(fx.shape[1], 1))
    fz = pm.Bound(pm.Normal, upper=0.0)('fz', mu=theano.sparse.dot(
        fxs, bs) + inter, sd=1.0, shape=(fx.shape[0], 1))
    sz = pm.Bound(pm.Normal, lower=0.0)('sz', mu=theano.sparse.dot(
        sxs, bs) + inter, sd=1.0, shape=(sx.shape[0], 1))

Thank You! It worked.