@@ -566,10 +566,6 @@ def bayes_risk(self, expparams):
566
566
has shape ``(expparams.size,)``
567
567
"""
568
568
569
- # outcomes for the first experiment
570
- os = self .model .domain (None ).values
571
- n_out = os .size
572
-
573
569
# for models whose outcome number changes with experiment, we
574
570
# take the easy way out and for-loop over experiments
575
571
n_eps = expparams .size
@@ -579,6 +575,9 @@ def bayes_risk(self, expparams):
579
575
risk [idx ] = self .bayes_risk (expparams [idx , np .newaxis ])
580
576
return risk
581
577
578
+ # outcomes for the first experiment
579
+ os = self .model .domain (expparams [0 ,np .newaxis ])[0 ].values
580
+
582
581
# compute the hypothetical weights, likelihoods and normalizations for
583
582
# every possible outcome and expparam
584
583
# the likelihood over outcomes should sum to 1, so don't compute for last outcome
@@ -628,10 +627,6 @@ def expected_information_gain(self, expparams):
628
627
# This is a special case of the KL divergence estimator (see below),
629
628
# in which the other distribution is guaranteed to share support.
630
629
631
- # number of outcomes for the first experiment
632
- os = self .model .domain (None ).values
633
- n_out = os .size
634
-
635
630
# for models whose outcome number changes with experiment, we
636
631
# take the easy way out and for-loop over experiments
637
632
n_eps = expparams .size
@@ -641,6 +636,9 @@ def expected_information_gain(self, expparams):
641
636
risk [idx ] = self .expected_information_gain (expparams [idx , np .newaxis ])
642
637
return risk
643
638
639
+ # number of outcomes for the first experiment
640
+ os = self .model .domain (expparams [0 ,np .newaxis ])[0 ].values
641
+
644
642
# compute the hypothetical weights, likelihoods and normalizations for
645
643
# every possible outcome and expparam
646
644
# the likelihood over outcomes should sum to 1, so don't compute for last outcome
0 commit comments