Hello. I’m getting the following error on my bayesian neural network (i’m new at these).
MissingInputError: Input 0 of the graph (indices start from 0), used to compute InplaceDimShuffle{x,x}(bias2), was not provided and not given a value. Use the Theano flag exception_verbosity=‘high’, for more information on this error.
I’m not sure what this input is supposed to be. I’ve looked into the theano docs but nothing jumps out.
The shape of the input and output are:
ann_input: (151437, 53)
ann_output: (1, 151437)
My code is below:
ann_input = tt.shared(np.asarray(X_train))
y_train_t = y_train.transpose()
ann_output = tt.shared(np.asarray(y_train_t))n_hidden = 16
Initialize random weights between each layer
init_1 = np.random.randn(X_train.shape[1], n_hidden)
init_2 = np.random.randn(n_hidden, n_hidden)
init_3 = np.random.randn(n_hidden, n_hidden)
init_4 = np.random.randn(n_hidden, n_hidden)
init_5 = np.random.randn(n_hidden, n_hidden)
init_6 = np.random.randn(n_hidden, n_hidden)
init_7 = np.random.randn(n_hidden, n_hidden)
init_8 = np.random.randn(n_hidden, n_hidden)
init_9 = np.random.randn(n_hidden, n_hidden)
init_10 = np.random.randn(n_hidden, n_hidden)
init_11 = np.random.randn(n_hidden, n_hidden)
init_12 = np.random.randn(n_hidden, n_hidden)
init_13 = np.random.randn(n_hidden, n_hidden)
init_14 = np.random.randn(n_hidden, n_hidden)
init_15 = np.random.randn(n_hidden, n_hidden)
init_out = np.random.randn(n_hidden)with pm.Model() as neual_network:
# Weights from input to hidden layer
weights_in_1 = pm.Normal(‘w_in_1’, 0, sd=1,
shape=(X_train.shape[1], n_hidden),
testval=init_1)# Weights from 1st to 2nd layer weights_1_2 = pm.Normal('w_1_2', 0, sd=1, shape=(n_hidden, n_hidden), testval=init_2) # Weights from 1st to 2nd layer weights_2_3 = pm.Normal('_2_3', 0, sd=1, shape=(n_hidden, n_hidden), testval=init_3) # Weights from 1st to 2nd layer weights_3_4 = pm.Normal('w_3_4', 0, sd=1, shape=(n_hidden, n_hidden), testval=init_4) # Weights from 1st to 2nd layer weights_4_5 = pm.Normal('w_4_5', 0, sd=1, shape=(n_hidden, n_hidden), testval=init_5) # Weights from 1st to 2nd layer weights_5_6 = pm.Normal('w_5_6', 0, sd=1, shape=(n_hidden, n_hidden), testval=init_6) # Weights from 1st to 2nd layer weights_6_7 = pm.Normal('w_6_7', 0, sd=1, shape=(n_hidden, n_hidden), testval=init_7) # Weights from 1st to 2nd layer weights_7_8 = pm.Normal('w_7_8', 0, sd=1, shape=(n_hidden, n_hidden), testval=init_8) # Weights from 1st to 2nd layer weights_8_9 = pm.Normal('w_8_9', 0, sd=1, shape=(n_hidden, n_hidden), testval=init_9) weights_9_10 = pm.Normal('w_9_10', 0, sd=1, shape=(n_hidden, n_hidden), testval=init_10) weights_10_11 = pm.Normal('w_10_11', 0, sd=1, shape=(n_hidden, n_hidden), testval=init_11) weights_11_12 = pm.Normal('w_11_12', 0, sd=1, shape=(n_hidden, n_hidden), testval=init_12) weights_12_13 = pm.Normal('w_12_13', 0, sd=1, shape=(n_hidden, n_hidden), testval=init_13) weights_13_14 = pm.Normal('w_13_14', 0, sd=1, shape=(n_hidden, n_hidden), testval=init_14) weights_14_15 = pm.Normal('w_14_15', 0, sd=1, shape=(n_hidden, n_hidden), testval=init_15) # Weights from hidden layer to output weights_15_out = pm.Normal('w_15_out', 0, sd=1, shape=(n_hidden,), testval=init_out) # Build neural-network using tanh activation function B2 = pm.Normal('bias2', 0., 1.) L1 = pm.Deterministic('Layer1', T.dot(ann_input, weights_in_1) + B2) L2 = pm.Deterministic('Layer2', T.nnet.relu(T.dot(L1, weights_1_2)+ B2)) L3 = pm.Deterministic('Layer3', T.nnet.relu(T.dot(L2, weights_2_3)+ B2)) L4 = pm.Deterministic('Layer4', T.nnet.relu(T.dot(L3, weights_3_4)+ B2)) L5 = pm.Deterministic('Layer5', T.nnet.relu(T.dot(L4, weights_4_5)+ B2)) L6 = pm.Deterministic('Layer6', T.nnet.relu(T.dot(L5, weights_5_6)+ B2)) L7 = pm.Deterministic('Layer7', T.nnet.relu(T.dot(L6, weights_6_7)+ B2)) L8 = pm.Deterministic('Layer8', T.nnet.relu(T.dot(L7, weights_7_8)+ B2)) L9 = pm.Deterministic('Layer9', T.nnet.relu(T.dot(L8, weights_8_9)+ B2)) L10 = pm.Deterministic('Laye10', T.nnet.relu(T.dot(L9, weights_9_10)+ B2)) L11 = pm.Deterministic('Layer11', T.nnet.relu(T.dot(L10, weights_10_11)+ B2)) L12 = pm.Deterministic('Layer12', T.nnet.relu(T.dot(L11, weights_11_12)+ B2)) L13 = pm.Deterministic('Layer13', T.nnet.relu(T.dot(L12, weights_12_13)+ B2)) L14 = pm.Deterministic('Layer14', T.nnet.relu(T.dot(L14, weights_13_14)+ B2)) L15 = pm.Deterministic('Layer15', T.nnet.relu(T.dot(L14, weights_14_15)+ B2)) act_out = T.dot(L15, weights_15_out) out = pm.Normal('out', mu = act_out, observed=ann_output, shape = y_train.shape) inference = pm.ADVI() approx = pm.fit(n=50000, method=inference) trace = pm.sample(draws=5000, progress_bar = True)