missing 1 required positional argument: 'mocker'
joshuaspear opened this issue · 3 comments
joshuaspear commented
This question has been transferred from issue #174.
I am having a bit of trouble mocking a method call within a method that I am trying to test. Thanks in advance! @
Class attempting to test
from sklearn.ensemble import GradientBoostingClassifier
from typing import Callable
import numpy as np
class BehavEst:
def __init__(self, estimator:Callable) -> None:
self.estimator = estimator
def eval_pdf(self, indep_vals, dep_vals):
raise NotImplementedError
class GbtEst(BehavEst):
def __init__(self, estimator:GradientBoostingClassifier) -> None:
super().__init__(estimator=estimator)
def eval_pdf(self, indep_vals:np.array, dep_vals:np.array):
slct_msk = (np.array(range(0,len(indep_vals))), dep_vals)
probs = self.estimator.predict_proba(X=indep_vals)
return probs[slct_msk]
Test file:
import pytest
import unittest
import numpy as np
from dtr_renal.models.components.policy_eval.BehavEst import (
GbtEst, GradientBoostingClassifier)
import logging
@pytest.fixture(scope='function', autouse=True)
def mocka(obj, mocker):
"""Exposes pytest-mock's "mocker" as "self.mocker" in
unittest.TestCase-based tests.
"""
obj.instance.mocker = mocker
behav_est = GbtEst(estimator=GradientBoostingClassifier())
class GbtEst_Test(unittest.TestCase):
@pytest.mark.usefixtures('mocka')
def test_1_eval_pdf(self, mocker):
# Dependant values range between 1 and 5
num_features = 10
# correct_pred = [
# True, False, False, True, False, True, True
# ]
dep_vals_input = np.array([1,2,1,5,2,4,1])
indep_vals_input = np.arange(0,len(dep_vals_input)*num_features)
indep_vals_input = indep_vals_input.reshape(-1,num_features)
np.random.seed(seed=1)
mocked_value = np.random.uniform(
0,1,len(dep_vals_input)*dep_vals_input.max()).reshape(
len(dep_vals_input), dep_vals_input.max())
actual_res = []
for idx, vals in zip(dep_vals_input, mocked_value):
actual_res.append(vals[idx-1])
actual_res = np.array(actual_res)
dep_vals_input=dep_vals_input.reshape(-1,1)
estimator_mock = mocker.patch("behav_est.estimator.predict_proba")
estimator_mock.return_value = mocked_value
pred_res = behav_est.eval_pdf(
indep_vals=indep_vals_input, dep_vals=dep_vals_input)
self.assertTrue(pred_res==actual_res)
if __name__ == "__main__":
unittest.main()
Stack trace:
======================================================================
ERROR: test_1_eval_pdf (__main__.GbtEst_Test)
----------------------------------------------------------------------
TypeError: test_1_eval_pdf() missing 1 required positional argument: 'mocker'
----------------------------------------------------------------------
Ran 1 test in 0.000s
FAILED (errors=1)
(nsr_env) joshuaspear@Joshuas-MacBook-Pro-3 dtr_renal %
The-Compiler commented
That looks like output from unittest.py. If you want to use pytest features, you need to run your tests with pytest.
miguelsmuller commented
I have the same problem
______ TestSearchSearchAPIUnitTestCase.test_do_search ______
self = <unittest.case._Outcome object at 0x103506308>, test_case = <tests.unit.test_search_search_api.TestSearchSearchAPIUnitTestCase testMethod=test_do_search>, isTest = True
@contextlib.contextmanager
def testPartExecutor(self, test_case, isTest=False):
old_success = self.success
self.success = True
try:
> yield
../../../.pyenv/versions/3.7.12/lib/python3.7/unittest/case.py:59:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <tests.unit.test_search_search_api.TestSearchSearchAPIUnitTestCase testMethod=test_do_search>, result = <TestCaseFunction test_do_search>
def run(self, result=None):
orig_result = result
if result is None:
result = self.defaultTestResult()
startTestRun = getattr(result, 'startTestRun', None)
if startTestRun is not None:
startTestRun()
result.startTest(self)
testMethod = getattr(self, self._testMethodName)
if (getattr(self.__class__, "__unittest_skip__", False) or
getattr(testMethod, "__unittest_skip__", False)):
# If the class or method was skipped.
try:
skip_why = (getattr(self.__class__, '__unittest_skip_why__', '')
or getattr(testMethod, '__unittest_skip_why__', ''))
self._addSkip(result, self, skip_why)
finally:
result.stopTest(self)
return
expecting_failure_method = getattr(testMethod,
"__unittest_expecting_failure__", False)
expecting_failure_class = getattr(self,
"__unittest_expecting_failure__", False)
expecting_failure = expecting_failure_class or expecting_failure_method
outcome = _Outcome(result)
try:
self._outcome = outcome
with outcome.testPartExecutor(self):
self.setUp()
if outcome.success:
outcome.expecting_failure = expecting_failure
with outcome.testPartExecutor(self, isTest=True):
> testMethod()
E TypeError: test_do_search() missing 4 required positional arguments: 'mock_build_json', 'mock_rearrange_search_engine_result', 'mock_do_search_query', and 'mock_get_search_fields'
../../../.pyenv/versions/3.7.12/lib/python3.7/unittest/case.py:628: TypeError
this is my fixture
@pytest.fixture()
def mock_get_search_fields(mocker):
mock = Mock()
mocker.patch(
'brainiak.search.search_search_api._get_search_fields',
return_value=mock
)
return mock
and this is my test
class TestXPTOCase(TestCase):
def test_do_search(self, mock_get_search_fields):
mock_get_search_fields.return_value = ["label"]
expected = ...
computed = ...
self.assertTrue(
all([
mock_get_search_fields.called
])
)
self.assertEqual(expected, computed)
nicoddemus commented
This is not related to pytest-mock, but the fact the pytest does not support fixtures in TestCase
subclasses, see: https://docs.pytest.org/en/stable/how-to/unittest.html#pytest-features-in-unittest-testcase-subclasses
Closing for now as this is not actionable on pytest-mock.