import tensorflow as tf import numpy as np # Define the model with Dropout layers class MC_Dropout_Model(tf.keras.Model): def __init__(self, input_shape, hidden_units, output_units, dropout_rate=0.5): super(MC_Dropout_Model, self).__init__() self.fc1 = tf.keras.layers.Dense(hidden_units, activation='relu', input_shape=input_shape) self.dropout = tf.keras.layers.Dropout(dropout_rate) self.fc2 = tf.keras.layers.Dense(output_units) def call(self, inputs, training=False): x = self.fc1(inputs) x = self.dropout(x, training=training) # Apply dropout even during inference return self.fc2(x) # Function to perform MC Dropout inference def mc_dropout_predict(model, x, n_samples=100): predictions = [] for _ in range(n_samples): # Forward pass with dropout activated predictions.append(model(x, training=True)) # Keep dropout active # Stack predictions across samples and calculate mean and variance predictions = tf.stack(predictions) mean_prediction = tf.reduce_mean(predictions, axis=0) uncertainty = tf.math.reduce_variance(predictions, axis=0) return mean_prediction, uncertainty # Example Usage if __name__ == "__main__": # Initialize model parameters input_shape = (10,) hidden_units = 20 output_units = 1 dropout_rate = 0.5 n_samples = 100 # Create the model instance model = MC_Dropout_Model(input_shape=input_shape, hidden_units=hidden_units, output_units=output_units, dropout_rate=dropout_rate) # Dummy input tensor x = tf.random.normal((1, 10)) # Batch size of 1, input dimension of 10 # Run MC Dropout to get predictions and uncertainty mean_prediction, uncertainty = mc_dropout_predict(model, x, n_samples=n_samples) print("Mean Prediction:", mean_prediction.numpy()) print("Uncertainty (Variance):", uncertainty.numpy())