import numpy as np
import pandas as pd

import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense,Dropout, Conv2D
from tensorflow.keras.models import load_model

%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt

Heteroskedastic Data

np.random.seed(42)
x = np.linspace(-0.5, 1, 100)
y = 5*x + 4 + 2*np.multiply(x, np.random.randn(100))
plt.scatter(x, y,s=20)
plt.xlabel("x")
plt.ylabel("y")
Text(0, 0.5, 'y')
class MonteCarloDropout(Dropout):
    def call(self, inputs):
        return super().call(inputs, training=True)

Model 1

model = Sequential()
model.add(Dense(input_shape=(1,), units=1))
model.summary()
Model: "sequential"
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 dense (Dense)               (None, 1)                 2         
                                                                 
=================================================================
Total params: 2
Trainable params: 2
Non-trainable params: 0
_________________________________________________________________
model.compile(loss='mse', optimizer='adam')
model.fit(x, y, epochs=4000, verbose=0)
<keras.callbacks.History at 0x21c6bbfdca0>
plt.scatter(x, y, s=20, alpha=0.6)
plt.xlabel("x")
plt.ylabel("y")
pred_m1 = model.predict(x)
plt.plot(x, pred_m1)
[<matplotlib.lines.Line2D at 0x21c6ce199d0>]

Model 2

model2 = Sequential()
model2.add(Dense(input_shape=(1,), units=1))
model2.add(MonteCarloDropout(rate=0.25))
model2.compile(loss='mse', optimizer='adam')
model2.fit(x, y, epochs=4000, verbose=0)
<keras.callbacks.History at 0x21c6ce72490>
T=100 #(NUMBER OF PREDICTIONS)
predictions=[model2.predict(x) for _ in range(T)]
predictions=np.stack(predictions)
mean=predictions.mean(axis=0)[:,0]
std=predictions.std(axis=0)[:,0]
def plot(x,y,mean,std):
    plt.scatter(x, y, s=20, alpha=0.6)
    plt.xlabel("x")
    plt.ylabel("y")
    plt.plot(x, mean , color='k')
    plt.fill_between(x, mean-std, mean+std, color='k', alpha=0.4)
plot(x,y,mean,std)

Model 3

model3 = Sequential()
model3.add(Dense(input_shape=(1,), units=300))
model3.add(MonteCarloDropout(rate=0.25))
model3.add(Dense(units=120))
model3.add(MonteCarloDropout(rate=0.25))
model3.add(Dense(units=10))
model3.add(MonteCarloDropout(rate=0.25))
model3.add(Dense(units=1))
model3.compile(loss='mse', optimizer='adam')
model3.fit(x, y, epochs=4000, verbose=0)
<keras.callbacks.History at 0x21c6ce11460>
T=100 #(NUMBER OF PREDICTIONS)
predictions=[model3.predict(x) for _ in range(T)]
predictions=np.stack(predictions)
mean=predictions.mean(axis=0)[:,0]
std=predictions.std(axis=0)[:,0]
plot(x,y,mean,std)
 
model4 = Sequential()
model4.add(Dense(input_shape=(1,), units=2, activation='relu'))
model4.add(MonteCarloDropout(rate=0.25))
model4.add(Dense(units=2, activation='relu'))
model4.add(MonteCarloDropout(rate=0.25))
model4.add(Dense(units=2))
model4.add(MonteCarloDropout(rate=0.25))
model4.add(Dense(units=1))
model4.compile(loss='mse', optimizer='adam')
model4.fit(x, y, epochs=4000, verbose=0)
<keras.callbacks.History at 0x21c6e19c5b0>
T=100 #(NUMBER OF PREDICTIONS)
predictions=[model4.predict(x) for _ in range(T)]
predictions=np.stack(predictions)
mean=predictions.mean(axis=0)[:,0]
std=predictions.std(axis=0)[:,0]
plot(x,y,mean,std)
 
model5 = Sequential()
model5.add(Dense(input_shape=(1,), units=300, activation='relu'))
model5.add(MonteCarloDropout(rate=0.25))
model5.add(Dense(units=120, activation='relu'))
model5.add(MonteCarloDropout(rate=0.25))
model5.add(Dense(units=10))
model5.add(MonteCarloDropout(rate=0.25))
model5.add(Dense(units=1))
model5.compile(loss='mse', optimizer='adam')
model5.fit(x, y, epochs=4000, verbose=0)
model5.save(".\models\model5.h5")
plot(x,y,mean,std)
 
model6 = Sequential()
model6.add(Dense(input_shape=(1,), units=300, activation='tanh'))
model6.add(MonteCarloDropout(rate=0.25))
model6.add(Dense(units=240, activation='tanh'))
model6.add(MonteCarloDropout(rate=0.25))
model6.add(Dense(units=120))
model6.add(MonteCarloDropout(rate=0.25))
model6.add(Dense(units=60))
model6.add(MonteCarloDropout(rate=0.25))
model6.add(Dense(units=30))
model6.add(MonteCarloDropout(rate=0.25))
model6.add(Dense(units=10))
model6.add(MonteCarloDropout(rate=0.25))
model6.add(Dense(units=1))
model6.compile(loss='mse', optimizer='adam')
model6.fit(x, y, epochs=4000, verbose=0)
<keras.callbacks.History at 0x21c7062dd00>
T=100 #(NUMBER OF PREDICTIONS)
predictions=[model6.predict(x) for _ in range(T)]
predictions=np.stack(predictions)
mean=predictions.mean(axis=0)[:,0]
std=predictions.std(axis=0)[:,0]
plot(x,y,mean,std)
 
def update(iteration):
    ax.cla()
    ax.scatter(x, y, s=20, alpha=0.6)
    ax.set_xlabel("x")
    ax.set_ylabel("y")
    ax.plot(x, mean[iteration] , color='k')
    ax.fill_between(x, mean[iteration]-std[iteration], mean[iteration]+std[iteration], color='k', alpha=0.4)
    fig.tight_layout()
from matplotlib.animation import FuncAnimation
from matplotlib import rc
fig, ax = plt.subplots()
anim = FuncAnimation(fig, update, frames=np.arange(0,num_iteration-1 , 1), interval=250)
plt.close()
rc('animation', html='jshtml')
anim
</input>
np.random.seed(42)
x_test = np.linspace(-0.5, 1.5, 30)
y_test = 5*x_test + 4 + 2*np.multiply(x_test, np.random.randn(30))
model5 = load_model('./models/model5.h5',custom_objects={'MonteCarloDropout': MonteCarloDropout})
def update_test(iteration):
    ax.cla()
    ax.scatter(x_test, y_test, s=20, alpha=0.6)
    ax.set_xlabel("x")
    ax.set_ylabel("y")
    ax.plot(x_test, mean_test[iteration] , color='k')
    ax.fill_between(x_test, mean_test[iteration]-std_test[iteration], mean_test[iteration]+std_test[iteration], color='k', alpha=0.4)
    ax.axvline(x=1,ls="--")
    fig.tight_layout()
from matplotlib.animation import FuncAnimation
from matplotlib import rc
fig, ax = plt.subplots()
anim = FuncAnimation(fig, update_test, frames=np.arange(0,num_iteration-1 , 1), interval=250)
plt.close()
rc('animation', html='jshtml')
anim
</input>