### tests/test_prompt.py
"""Tests for `cookiecutter.prompt` module."""

import json
import platform
import sys
from collections import OrderedDict
from pathlib import Path

import click
import pytest

from cookiecutter import environment, exceptions, prompt


@pytest.fixture(autouse=True)
def patch_readline_on_win(monkeypatch):
    """Fixture. Overwrite windows end of line to linux standard."""
    if 'windows' in platform.platform().lower():
        monkeypatch.setattr('sys.stdin.readline', lambda: '\n')


class TestRenderVariable:
    """Class to unite simple and complex tests for render_variable function."""

    @pytest.mark.parametrize(
        'raw_var, rendered_var',
        [
            (1, '1'),
            (True, True),
            ('foo', 'foo'),
            ('{{cookiecutter.project}}', 'foobar'),
            (None, None),
        ],
    )
    def test_convert_to_str(self, mocker, raw_var, rendered_var):
        """Verify simple items correctly rendered to strings."""
        env = environment.StrictEnvironment()
        from_string = mocker.patch(
            'cookiecutter.utils.StrictEnvironment.from_string', wraps=env.from_string
        )
        context = {'project': 'foobar'}

        result = prompt.render_variable(env, raw_var, context)
        assert result == rendered_var

        # Make sure that non None non str variables are converted beforehand
        if raw_var is not None and not isinstance(raw_var, bool):
            if not isinstance(raw_var, str):
                raw_var = str(raw_var)
            from_string.assert_called_once_with(raw_var)
        else:
            assert not from_string.called

    @pytest.mark.parametrize(
        'raw_var, rendered_var',
        [
            ({1: True, 'foo': False}, {'1': True, 'foo': False}),
            (
                {'{{cookiecutter.project}}': ['foo', 1], 'bar': False},
                {'foobar': ['foo', '1'], 'bar': False},
            ),
            (['foo', '{{cookiecutter.project}}', None], ['foo', 'foobar', None]),
        ],
    )
    def test_convert_to_str_complex_variables(self, raw_var, rendered_var):
        """Verify tree items correctly rendered."""
        env = environment.StrictEnvironment()
        context = {'project': 'foobar'}

        result = prompt.render_variable(env, raw_var, context)
        assert result == rendered_var


class TestPrompt:
    """Class to unite user prompt related tests."""

    @pytest.mark.parametrize(
        'context',
        [
            {'cookiecutter': {'full_name': 'Your Name'}},
            {'cookiecutter': {'full_name': 'Řekni či napiš své jméno'}},
        ],
        ids=['ASCII default prompt/input', 'Unicode default prompt/input'],
    )
    def test_prompt_for_config(self, monkeypatch, context):
        """Verify `prompt_for_config` call `read_user_variable` on text request."""
        monkeypatch.setattr(
            'cookiecutter.prompt.read_user_variable',
            lambda var, default, prompts, prefix: default,
        )

        cookiecutter_dict = prompt.prompt_for_config(context)
        assert cookiecutter_dict == context['cookiecutter']

    @pytest.mark.parametrize(
        'context',
        [
            {
                'cookiecutter': {
                    'full_name': 'Your Name',
                    'check': ['yes', 'no'],
                    'nothing': 'ok',
                    '__prompts__': {
                        'full_name': 'Name please',
                        'check': 'Checking',
                    },
                }
            },
        ],
        ids=['ASCII default prompt/input'],
    )
    def test_prompt_for_config_with_human_prompts(self, monkeypatch, context):
        """Verify call `read_user_variable` on request when human-readable prompts."""
        monkeypatch.setattr(
            'cookiecutter.prompt.read_user_variable',
            lambda var, default, prompts, prefix: default,
        )
        monkeypatch.setattr(
            'cookiecutter.prompt.read_user_yes_no',
            lambda var, default, prompts, prefix: default,
        )
        monkeypatch.setattr(
            'cookiecutter.prompt.read_user_choice',
            lambda var, default, prompts, prefix: default,
        )

        cookiecutter_dict = prompt.prompt_for_config(context)
        assert cookiecutter_dict == context['cookiecutter']

    @pytest.mark.parametrize(
        'context',
        [
            {
                'cookiecutter': {
                    'full_name': 'Your Name',
                    'check': ['yes', 'no'],
                    '__prompts__': {
                        'check': 'Checking',
                    },
                }
            },
            {
                'cookiecutter': {
                    'full_name': 'Your Name',
                    'check': ['yes', 'no'],
                    '__prompts__': {
                        'full_name': 'Name please',
                        'check': {'__prompt__': 'Checking', 'yes': 'Yes', 'no': 'No'},
                    },
                }
            },
            {
                'cookiecutter': {
                    'full_name': 'Your Name',
                    'check': ['yes', 'no'],
                    '__prompts__': {
                        'full_name': 'Name please',
                        'check': {'no': 'No'},
                    },
                }
            },
        ],
    )
    def test_prompt_for_config_with_human_choices(self, monkeypatch, context):
        """Test prompts when human-readable labels for user choices."""
        runner = click.testing.CliRunner()
        with runner.isolation(input="\n\n\n"):
            cookiecutter_dict = prompt.prompt_for_config(context)

        assert dict(cookiecutter_dict) == {'full_name': 'Your Name', 'check': 'yes'}

    def test_prompt_for_config_dict(self, monkeypatch):
        """Verify `prompt_for_config` call `read_user_variable` on dict request."""
        monkeypatch.setattr(
            'cookiecutter.prompt.read_user_dict',
            lambda var, default, prompts, prefix: {"key": "value", "integer": 37},
        )
        context = {'cookiecutter': {'details': {}}}

        cookiecutter_dict = prompt.prompt_for_config(context)
        assert cookiecutter_dict == {'details': {'key': 'value', 'integer': 37}}

    def test_should_render_dict(self):
        """Verify template inside dictionary variable rendered."""
        context = {
            'cookiecutter': {
                'project_name': 'Slartibartfast',
                'details': {
                    '{{cookiecutter.project_name}}': '{{cookiecutter.project_name}}'
                },
            }
        }

        cookiecutter_dict = prompt.prompt_for_config(context, no_input=True)
        assert cookiecutter_dict == {
            'project_name': 'Slartibartfast',
            'details': {'Slartibartfast': 'Slartibartfast'},
        }

    def test_should_render_deep_dict(self):
        """Verify nested structures like dict in dict, rendered correctly."""
        context = {
            'cookiecutter': {
                'project_name': "Slartibartfast",
                'details': {
                    "key": "value",
                    "integer_key": 37,
                    "other_name": '{{cookiecutter.project_name}}',
                    "dict_key": {
                        "deep_key": "deep_value",
                        "deep_integer": 42,
                        "deep_other_name": '{{cookiecutter.project_name}}',
                        "deep_list": [
                            "deep value 1",
                            "{{cookiecutter.project_name}}",
                            "deep value 3",
                        ],
                    },
                    "list_key": [
                        "value 1",
                        "{{cookiecutter.project_name}}",
                        "value 3",
                    ],
                },
            }
        }

        cookiecutter_dict = prompt.prompt_for_config(context, no_input=True)
        assert cookiecutter_dict == {
            'project_name': "Slartibartfast",
            'details': {
                "key": "value",
                "integer_key": "37",
                "other_name": "Slartibartfast",
                "dict_key": {
                    "deep_key": "deep_value",
                    "deep_integer": "42",
                    "deep_other_name": "Slartibartfast",
                    "deep_list": ["deep value 1", "Slartibartfast", "deep value 3"],
                },
                "list_key": ["value 1", "Slartibartfast", "value 3"],
            },
        }

    def test_should_render_deep_dict_with_human_prompts(self):
        """Verify dict rendered correctly when human-readable prompts."""
        context = {
            'cookiecutter': {
                'project_name': "Slartibartfast",
                'details': {
                    "key": "value",
                    "integer_key": 37,
                    "other_name": '{{cookiecutter.project_name}}',
                    "dict_key": {
                        "deep_key": "deep_value",
                    },
                },
                '__prompts__': {'project_name': 'Project name'},
            }
        }
### tests/test_read_user_variable.py
"""test_read_user_variable."""

import pytest

from cookiecutter.prompt import read_user_variable

VARIABLE = 'project_name'
DEFAULT = 'Kivy Project'


@pytest.fixture
def mock_prompt(mocker):
    """Return a mocked version of the 'Prompt.ask' function."""
    return mocker.patch('rich.prompt.Prompt.ask')


def test_click_invocation(mock_prompt):
    """Test click function called correctly by cookiecutter.

    Test for string type invocation.
    """
    mock_prompt.return_value = DEFAULT

    assert read_user_variable(VARIABLE, DEFAULT) == DEFAULT

    mock_prompt.assert_called_once_with(VARIABLE, default=DEFAULT)


def test_input_loop_with_null_default_value(mock_prompt):
    """Test `Prompt.ask` is run repeatedly until a valid answer is provided.

    Test for `default_value` parameter equal to None.
    """
    # Simulate user providing None input initially and then a valid input
    mock_prompt.side_effect = [None, DEFAULT]

    assert read_user_variable(VARIABLE, None) == DEFAULT
    assert mock_prompt.call_count == 2
### tests/test_read_user_choice.py
"""Tests around prompting for and handling of choice variables."""

import pytest

from cookiecutter.prompt import read_user_choice

OPTIONS = ['hello', 'world', 'foo', 'bar']
OPTIONS_INDEX = ['1', '2', '3', '4']

EXPECTED_PROMPT = """Select varname
    [bold magenta]1[/] - [bold]hello[/]
    [bold magenta]2[/] - [bold]world[/]
    [bold magenta]3[/] - [bold]foo[/]
    [bold magenta]4[/] - [bold]bar[/]
    Choose from"""


@pytest.mark.parametrize('user_choice, expected_value', enumerate(OPTIONS, 1))
def test_click_invocation(mocker, user_choice, expected_value):
    """Test click function called correctly by cookiecutter.

    Test for choice type invocation.
    """
    prompt = mocker.patch('rich.prompt.Prompt.ask')
    prompt.return_value = f'{user_choice}'

    assert read_user_choice('varname', OPTIONS) == expected_value

    prompt.assert_called_once_with(EXPECTED_PROMPT, choices=OPTIONS_INDEX, default='1')


def test_raise_if_options_is_not_a_non_empty_list():
    """Test function called by cookiecutter raise expected errors.

    Test for choice type invocation.
    """
    with pytest.raises(TypeError):
        read_user_choice('foo', 'NOT A LIST')

    with pytest.raises(ValueError):
        read_user_choice('foo', [])
### tests/test_read_user_yes_no.py
"""test_read_user_yes_no."""

import pytest
from rich.prompt import InvalidResponse

from cookiecutter.prompt import YesNoPrompt, read_user_yes_no

QUESTION = 'Is it okay to delete and re-clone it?'
DEFAULT = 'y'


def test_click_invocation(mocker):
    """Test click function called correctly by cookiecutter.

    Test for boolean type invocation.
    """
    prompt = mocker.patch('cookiecutter.prompt.YesNoPrompt.ask')
    prompt.return_value = DEFAULT

    assert read_user_yes_no(QUESTION, DEFAULT) == DEFAULT

    prompt.assert_called_once_with(QUESTION, default=DEFAULT)


def test_yesno_prompt_process_response():
    """Test `YesNoPrompt` process_response to convert str to bool."""
    ynp = YesNoPrompt()
    with pytest.raises(InvalidResponse):
        ynp.process_response('wrong')
    assert ynp.process_response('t') is True
    assert ynp.process_response('f') is False
### tests/test_read_user_dict.py
"""Test `process_json`, `read_user_dict` functions in `cookiecutter.prompt`."""

import click
import pytest
from rich.prompt import InvalidResponse

from cookiecutter.prompt import JsonPrompt, process_json, read_user_dict


def test_process_json_invalid_json():
    """Test `process_json` for correct error on malformed input."""
    with pytest.raises(InvalidResponse) as exc_info:
        process_json('nope]')

    assert str(exc_info.value) == 'Unable to decode to JSON.'


def test_process_json_non_dict():
    """Test `process_json` for correct error on non-JSON input."""
    with pytest.raises(InvalidResponse) as exc_info:
        process_json('[1, 2]')

    assert str(exc_info.value) == 'Requires JSON dict.'


def test_process_json_valid_json():
    """Test `process_json` for correct output on JSON input.

    Test for simple dict with list.
    """
    user_value = '{"name": "foobar", "bla": ["a", 1, "b", false]}'

    assert process_json(user_value) == {
        'name': 'foobar',
        'bla': ['a', 1, 'b', False],
    }


def test_process_json_deep_dict():
    """Test `process_json` for correct output on JSON input.

    Test for dict in dict case.
    """
    user_value = '''{
        "key": "value",
        "integer_key": 37,
        "dict_key": {
            "deep_key": "deep_value",
            "deep_integer": 42,
            "deep_list": [
                "deep value 1",
                "deep value 2",
                "deep value 3"
            ]
        },
        "list_key": [
            "value 1",
            "value 2",
            "value 3"
        ]
    }'''

    assert process_json(user_value) == {
        "key": "value",
        "integer_key": 37,
        "dict_key": {
            "deep_key": "deep_value",
            "deep_integer": 42,
            "deep_list": ["deep value 1", "deep value 2", "deep value 3"],
        },
        "list_key": ["value 1", "value 2", "value 3"],
    }


def test_should_raise_type_error(mocker):
    """Test `default_value` arg verification in `read_user_dict` function."""
    prompt = mocker.patch('cookiecutter.prompt.JsonPrompt.ask')

    with pytest.raises(TypeError):
        read_user_dict('name', 'russell')
    assert not prompt.called


def test_should_call_prompt_with_process_json(mocker):
    """Test to make sure that `process_json` is actuall<response clipped><NOTE>Due to the max output limit, only part of the full response has been shown to you.</NOTE>.return_value = {
        'cookiecutter': {}
    }
    mocker.patch('cookiecutter.main.generate_files')
    mocker.patch('cookiecutter.main.dump')

    cookiecutter(
        '.',
        replay=True,
        config_file=user_config_file,
    )

    mock_replay_load.assert_called_once_with(
        user_config_data['replay_dir'],
        'fake-repo-tmpl',
    )


def test_custom_replay_file(monkeypatch, mocker, user_config_file):
    """Check that reply.load is called with the custom replay_file."""
    monkeypatch.chdir('tests/fake-repo-tmpl')

    mock_replay_load = mocker.patch('cookiecutter.main.load')
    mocker.patch('cookiecutter.main.generate_context').return_value = {
        'cookiecutter': {}
    }
    mocker.patch('cookiecutter.main.generate_files')
    mocker.patch('cookiecutter.main.dump')

    cookiecutter(
        '.',
        replay='./custom-replay-file',
        config_file=user_config_file,
    )

    mock_replay_load.assert_called_once_with(
        '.',
        'custom-replay-file',
    )
### tests/test_cli.py
"""Collection of tests around cookiecutter's command-line interface."""

import json
import os
import re
from pathlib import Path

import pytest
from click.testing import CliRunner

from cookiecutter import utils
from cookiecutter.__main__ import main
from cookiecutter.environment import StrictEnvironment
from cookiecutter.exceptions import UnknownExtension
from cookiecutter.main import cookiecutter


@pytest.fixture(scope='session')
def cli_runner():
    """Fixture that returns a helper function to run the cookiecutter cli."""
    runner = CliRunner()

    def cli_main(*cli_args, **cli_kwargs):
        """Run cookiecutter cli main with the given args."""
        return runner.invoke(main, cli_args, **cli_kwargs)

    return cli_main


@pytest.fixture
def remove_fake_project_dir(request):
    """Remove the fake project directory created during the tests."""

    def fin_remove_fake_project_dir():
        for prefix in ('', 'input'):
            dir_name = f'{prefix}fake-project'
            if os.path.isdir(dir_name):
                utils.rmtree(dir_name)

    request.addfinalizer(fin_remove_fake_project_dir)


@pytest.fixture
def remove_tmp_dir(request):
    """Remove the fake project directory created during the tests."""
    if os.path.isdir('tests/tmp'):
        utils.rmtree('tests/tmp')

    def fin_remove_tmp_dir():
        if os.path.isdir('tests/tmp'):
            utils.rmtree('tests/tmp')

    request.addfinalizer(fin_remove_tmp_dir)


@pytest.fixture
def make_fake_project_dir(request):
    """Create a fake project to be overwritten in the according tests."""
    os.makedirs('fake-project')


@pytest.fixture(params=['-V', '--version'])
def version_cli_flag(request):
    """Pytest fixture return both version invocation options."""
    return request.param


def test_cli_version(cli_runner, version_cli_flag):
    """Verify Cookiecutter version output by `cookiecutter` on cli invocation."""
    result = cli_runner(version_cli_flag)
    assert result.exit_code == 0
    assert result.output.startswith('Cookiecutter')


@pytest.mark.usefixtures('make_fake_project_dir', 'remove_fake_project_dir')
def test_cli_error_on_existing_output_directory(cli_runner):
    """Test cli invocation without `overwrite-if-exists` fail if dir exist."""
    result = cli_runner('tests/fake-repo-pre/', '--no-input')
    assert result.exit_code != 0
    expected_error_msg = 'Error: "fake-project" directory already exists\n'
    assert result.output == expected_error_msg


@pytest.mark.usefixtures('remove_fake_project_dir')
def test_cli(cli_runner):
    """Test cli invocation work without flags if directory not exist."""
    result = cli_runner('tests/fake-repo-pre/', '--no-input')
    assert result.exit_code == 0
    assert os.path.isdir('fake-project')
    content = Path("fake-project", "README.rst").read_text()
    assert 'Project name: **Fake Project**' in content


@pytest.mark.usefixtures('remove_fake_project_dir')
def test_cli_verbose(cli_runner):
    """Test cli invocation display log if called with `verbose` flag."""
    result = cli_runner('tests/fake-repo-pre/', '--no-input', '-v')
    assert result.exit_code == 0
    assert os.path.isdir('fake-project')
    content = Path("fake-project", "README.rst").read_text()
    assert 'Project name: **Fake Project**' in content


@pytest.mark.usefixtures('remove_fake_project_dir')
def test_cli_replay(mocker, cli_runner):
    """Test cli invocation display log with `verbose` and `replay` flags."""
    mock_cookiecutter = mocker.patch('cookiecutter.cli.cookiecutter')

    template_path = 'tests/fake-repo-pre/'
    result = cli_runner(template_path, '--replay', '-v')

    assert result.exit_code == 0
    mock_cookiecutter.assert_called_once_with(
        template_path,
        None,
        False,
        replay=True,
        overwrite_if_exists=False,
        skip_if_file_exists=False,
        output_dir='.',
        config_file=None,
        default_config=False,
        extra_context=None,
        password=None,
        directory=None,
        accept_hooks=True,
        keep_project_on_failure=False,
    )


@pytest.mark.usefixtures('remove_fake_project_dir')
def test_cli_replay_file(mocker, cli_runner):
    """Test cli invocation correctly pass --replay-file option."""
    mock_cookiecutter = mocker.patch('cookiecutter.cli.cookiecutter')

    template_path = 'tests/fake-repo-pre/'
    result = cli_runner(template_path, '--replay-file', '~/custom-replay-file', '-v')

    assert result.exit_code == 0
    mock_cookiecutter.assert_called_once_with(
        template_path,
        None,
        False,
        replay='~/custom-replay-file',
        overwrite_if_exists=False,
        skip_if_file_exists=False,
        output_dir='.',
        config_file=None,
        default_config=False,
        extra_context=None,
        password=None,
        directory=None,
        accept_hooks=True,
        keep_project_on_failure=False,
    )


@pytest.mark.usefixtures('remove_tmp_dir')
def test_cli_replay_generated(mocker, cli_runner):
    """Test cli invocation correctly generates a project with replay."""
    template_path = 'tests/fake-repo-replay/'
    result = cli_runner(
        template_path,
        '--replay-file',
        'tests/test-replay/valid_replay.json',
        '-o',
        'tests/tmp/',
        '-v',
    )
    assert result.exit_code == 0
    assert open('tests/tmp/replay-project/README.md').read().strip() == 'replayed'


@pytest.mark.usefixtures('remove_fake_project_dir')
def test_cli_exit_on_noinput_and_replay(mocker, cli_runner):
    """Test cli invocation fail if both `no-input` and `replay` flags passed."""
    mock_cookiecutter = mocker.patch(
        'cookiecutter.cli.cookiecutter', side_effect=cookiecutter
    )

    template_path = 'tests/fake-repo-pre/'
    result = cli_runner(template_path, '--no-input', '--replay', '-v')

    assert result.exit_code == 1

    expected_error_msg = (
        "You can not use both replay and no_input or extra_context at the same time."
    )

    assert expected_error_msg in result.output

    mock_cookiecutter.assert_called_once_with(
        template_path,
        None,
        True,
        replay=True,
        overwrite_if_exists=False,
        skip_if_file_exists=False,
        output_dir='.',
        config_file=None,
        default_config=False,
        extra_context=None,
        password=None,
        directory=None,
        accept_hooks=True,
        keep_project_on_failure=False,
    )


@pytest.fixture(params=['-f', '--overwrite-if-exists'])
def overwrite_cli_flag(request):
    """Pytest fixture return all `overwrite-if-exists` invocation options."""
    return request.param


@pytest.mark.usefixtures('remove_fake_project_dir')
def test_run_cookiecutter_on_overwrite_if_exists_and_replay(
    mocker, cli_runner, overwrite_cli_flag
):
    """Test cli invocation with `overwrite-if-exists` and `replay` flags."""
    mock_cookiecutter = mocker.patch('cookiecutter.cli.cookiecutter')

    template_path = 'tests/fake-repo-pre/'
    result = cli_runner(template_path, '--replay', '-v', overwrite_cli_flag)

    assert result.exit_code == 0

    mock_cookiecutter.assert_called_once_with(
        template_path,
        None,
        False,
        replay=True,
        overwrite_if_exists=True,
        skip_if_file_exists=False,
        output_dir='.',
        config_file=None,
        default_config=False,
        extra_context=None,
        password=None,
        directory=None,
        accept_hooks=True,
        keep_project_on_failure=False,
    )


@pytest.mark.usefixtures('remove_fake_project_dir')
def test_cli_overwrite_if_exists_when_output_dir_does_not_exist(
    cli_runner, overwrite_cli_flag
):
    """Test cli invocation with `overwrite-if-exists` and `no-input` flags.

    Case when output dir not exist.
    """
    result = cli_runner('tests/fake-repo-pre/', '--no-input', overwrite_cli_flag)

    assert result.exit_code == 0
    assert os.path.isdir('fake-project')


@pytest.mark.usefixtures('make_fake_project_dir', 'remove_fake_project_dir')
### tests/test_get_user_config.py
"""Tests to verify correct work with user configs and system/user variables inside."""

import os
import shutil

import pytest

from cookiecutter import config
from cookiecutter.exceptions import InvalidConfiguration


@pytest.fixture(scope='module')
def user_config_path():
    """Fixture. Return user config path for current user."""
    return os.path.expanduser('~/.cookiecutterrc')


@pytest.fixture(scope='function')
def back_up_rc(user_config_path):
    """
    Back up an existing cookiecutter rc and restore it after the test.

    If ~/.cookiecutterrc is pre-existing, move it to a temp location
    """
    user_config_path_backup = os.path.expanduser('~/.cookiecutterrc.backup')

    if os.path.exists(user_config_path):
        shutil.copy(user_config_path, user_config_path_backup)
        os.remove(user_config_path)

    yield
    # Remove the ~/.cookiecutterrc that has been created in the test.
    if os.path.exists(user_config_path):
        os.remove(user_config_path)

    # If it existed, restore the original ~/.cookiecutterrc.
    if os.path.exists(user_config_path_backup):
        shutil.copy(user_config_path_backup, user_config_path)
        os.remove(user_config_path_backup)


@pytest.fixture
def custom_config():
    """Fixture. Return expected custom configuration for future tests validation."""
    return {
        'default_context': {
            'full_name': 'Firstname Lastname',
            'email': 'firstname.lastname@gmail.com',
            'github_username': 'example',
            'project': {
                'description': 'description',
                'tags': [
                    'first',
                    'second',
                    'third',
                ],
            },
        },
        'cookiecutters_dir': '/home/example/some-path-to-templates',
        'replay_dir': '/home/example/some-path-to-replay-files',
        'abbreviations': {
            'gh': 'https://github.com/{0}.git',
            'gl': 'https://gitlab.com/{0}.git',
            'bb': 'https://bitbucket.org/{0}',
            'helloworld': 'https://github.com/hackebrot/helloworld',
        },
    }


@pytest.mark.usefixtures('back_up_rc')
def test_get_user_config_valid(user_config_path, custom_config):
    """Validate user config correctly parsed if exist and correctly formatted."""
    shutil.copy('tests/test-config/valid-config.yaml', user_config_path)
    conf = config.get_user_config()

    assert conf == custom_config


@pytest.mark.usefixtures('back_up_rc')
def test_get_user_config_invalid(user_config_path):
    """Validate `InvalidConfiguration` raised when provided user config malformed."""
    shutil.copy('tests/test-config/invalid-config.yaml', user_config_path)
    with pytest.raises(InvalidConfiguration):
        config.get_user_config()


@pytest.mark.usefixtures('back_up_rc')
def test_get_user_config_nonexistent():
    """Validate default app config returned, if user does not have own config."""
    assert config.get_user_config() == config.DEFAULT_CONFIG


@pytest.fixture
def custom_config_path():
    """Fixture. Return path to custom user config for tests."""
    return 'tests/test-config/valid-config.yaml'


def test_specify_config_path(mocker, custom_config_path, custom_config):
    """Validate provided custom config path should be respected and parsed."""
    spy_get_config = mocker.spy(config, 'get_config')

    user_config = config.get_user_config(custom_config_path)
    spy_get_config.assert_called_once_with(custom_config_path)

    assert user_config == custom_config


def test_default_config_path(user_config_path):
    """Validate app configuration. User config path should match default path."""
    assert config.USER_CONFIG_PATH == user_config_path


def test_default_config_from_env_variable(
    monkeypatch, custom_config_path, custom_config
):
    """Validate app configuration. User config path should be parsed from sys env."""
    monkeypatch.setenv('COOKIECUTTER_CONFIG', custom_config_path)

    user_config = config.get_user_config()
    assert user_config == custom_config


def test_force_default_config(mocker, custom_config_path):
    """Validate `default_config=True` should ignore provided custom user config."""
    spy_get_config = mocker.spy(config, 'get_config')

    user_config = config.get_user_config(custom_config_path, default_config=True)

    assert user_config == config.DEFAULT_CONFIG
    assert not spy_get_config.called


def test_expand_user_for_directories_in_config(monkeypatch):
    """Validate user pointers expanded in user configs."""

    def _expanduser(path):
        return path.replace('~', 'Users/bob')

    monkeypatch.setattr('os.path.expanduser', _expanduser)

    config_file = 'tests/test-config/config-expand-user.yaml'

    user_config = config.get_user_config(config_file)
    assert user_config['replay_dir'] == 'Users/bob/replay-files'
    assert user_config['cookiecutters_dir'] == 'Users/bob/templates'


def test_expand_vars_for_directories_in_config(monkeypatch):
    """Validate environment variables expanded in user configs."""
    monkeypatch.setenv('COOKIES', 'Users/bob/cookies')

    config_file = 'tests/test-config/config-expand-vars.yaml'

    user_config = config.get_user_config(config_file)
    assert user_config['replay_dir'] == 'Users/bob/cookies/replay-files'
    assert user_config['cookiecutters_dir'] == 'Users/bob/cookies/templates'


def test_specify_config_values():
    """Validate provided custom config values should be respected."""
    replay_dir = 'Users/bob/cookies/custom-replay-dir'
    custom_config_updated = {**config.DEFAULT_CONFIG, 'replay_dir': replay_dir}

    user_config = config.get_user_config(default_config={'replay_dir': replay_dir})

    assert user_config == custom_config_updated
[The command completed with exit code 0.]
[Current working directory: /workspace/cookiecutter]
[Python interpreter: /usr/bin/python]
[Command finished with exit code 0]