import numpy as np # Let's import a library that helps us with math
from scipy.special import logsumexp # here is the special function we want

# Type in the hypothesis lengths (1-Knower, 2-Knower, 3-Knower, CP-knower)
# YOUR CODE HERE : hyp_lengths = ...


# Equation (2) from the tutorial
priors = np.power(hyp_lengths,-1.) / np.sum(np.power(hyp_lengths,-1.))

# Let's work in logs
log_prior = np.log(priors)

# Remember: log(x*y) = log(x) + log(y)
# Therefore: log(x**3) = log(x*x*x) = log(x) + log(x) + log(x) = 3*log(x)

log_likelihoods_N30 = [20*np.log(0.91) + 10*np.log(0.01), # 1-Knower
                       24*np.log(0.91) + 6*np.log(0.01),  # 2-Knower
                       26*np.log(0.91) + 4*np.log(0.01),  # 3-Knower
                       30*np.log(0.91) + 0*np.log(0.01)]  # CP-knower

# Let's calculate the numerator in Bayes Rule
log_post_score_N30 = log_prior + log_likelihoods_N30

# Now that pesky denominator
log_evidence_N30 = logsumexp(log_post_score_N30)

# Divide the numerator by the denominator
log_posterior_N30 = log_post_score_N30 - log_evidence_N30

# Let's get out of logs and back to real probabilities
posterior_N30 = np.exp(log_posterior_N30)

print(np.round(posterior_N30, 4))