nolar / kopf

A Python framework to write Kubernetes operators in just a few lines of code

Home Page:https://kopf.readthedocs.io/

Geek Repo:Geek Repo

Github PK Tool:Github PK Tool

RuntimeError: Timeout context manager should be used inside a task

jkupferer opened this issue · comments

Long story short

When using kubernetes_asyncio within a handler with version 1.36.1 I get this error from aiohttp:

RuntimeError: Timeout context manager should be used inside a task

The same code running with 1.36.0 does not have this issue.

Kopf version

1.36.1

Kubernetes version

1.25.9

Python version

3.11

Code

# https://github.com/redhat-cop/containers-quickstarts/blob/master/build-s2i-python-kopf/examples/kopf-simple/operator/operator.py

#!/usr/bin/env python

import kopf
import kubernetes_asyncio
import os
import random
import string
import yaml

operator_domain = os.environ.get('OPERATOR_DOMAIN', 'kopf-simple.example.com')
config_map_label = operator_domain + '/config'

if os.path.exists('/var/run/secrets/kubernetes.io/serviceaccount'):
    kubernetes_asyncio.config.load_incluster_config()
else:
    kubernetes_asyncio.config.load_kube_config()

core_v1_api = kubernetes_asyncio.client.CoreV1Api()

def random_string(length=8, character_set=''):
    '''
    Return a random string of specified length and character set.
    '''
    if character_set == '':
        character_set = string.ascii_lowercase + string.digits
    return ''.join(random.choice(character_set) for i in range(length))

def owner_reference_from_resource(resource):
    return kubernetes_asyncio.client.V1OwnerReference(
        api_version = resource['apiVersion'],
        controller = True,
        block_owner_deletion = False,
        kind = resource['kind'],
        name = resource['metadata']['name'],
        uid = resource['metadata']['uid']
    )

def load_config_map(config_map):
    metadata = config_map['metadata']
    name = metadata['name']
    if not 'data' in config_map \
    or 'config' not in config_map['data']:
        raise kopf.PermanentError('Config map must include config data')
    try:
        config = yaml.safe_load(config_map['data']['config'])
    except yaml.parser.ParserError as e:
        raise kopf.PermanentError('Unable to load config YAML: {0}'.format(str(e)))
    if not 'secretNames' in config:
        raise kopf.PermanentError('Config data must include secretNames')
    if not isinstance(config['secretNames'], list):
        raise kopf.PermanentError('Config data secretNames must be a list')
    return config

async def get_secret(name, namespace):
    '''
    Read namespaced secret, return None if not found.
    '''
    try:
        return await core_v1_api.read_namespaced_secret(name, namespace)
    except kubernetes_asyncio.client.rest.ApiException as e:
        if e.status == 404:
            return None
        raise

async def update_config_map_status(name, namespace, config_map, secret):
    '''
    Update status into ConfigMap data
    '''
    await core_v1_api.patch_namespaced_config_map(name, namespace, dict(
        data = dict(
            status = yaml.safe_dump(dict(
                secret = dict(
                    apiVersion = secret.api_version,
                    kind = secret.kind,
                    name = secret.metadata.name,
                    namespace = secret.metadata.namespace,
                    resourceVersion = secret.metadata.resource_version,
                    uid = secret.metadata.uid
                )
            ), default_flow_style=False)
        )
    ))

async def create_secret(config, name, namespace, owner_reference, logger):
    secret_data = dict()

    for secret_name in config['secretNames']:
        secret_data[secret_name] = random_string()

    secret = await core_v1_api.create_namespaced_secret(
        namespace,
        kubernetes_asyncio.client.V1Secret(
            metadata = kubernetes_asyncio.client.V1ObjectMeta(
                name = name,
                owner_references = [owner_reference]
            ),
            string_data = secret_data
        )
    )
    logger.info('Created secret %s', secret.metadata.name)
    return secret

async def manage_secret_values(config, secret, logger):
    '''
    Add any required values to secret.
    '''
    new_secret_data = dict()
    for secret_name in config['secretNames']:
        if secret_name not in secret.data:
           new_secret_data[secret_name] = random_string()
    if new_secret_data:
        secret.string_data = new_secret_data
        secret = await core_v1_api.replace_namespaced_secret(secret.metadata.name, secret.metadata.namespace, secret)
        logger.info('Updated secret %s', secret.metadata.name)
    else:
        logger.debug('No change for secret %s', secret.metadata.name)
    return secret

async def manage_secret_for_config_map(name, namespace, config_map, logger):
    '''
    Create secrets based on config_map.
    '''
    config = load_config_map(config_map)
    owner_reference = owner_reference_from_resource(config_map)
    secret = await get_secret(name, namespace)
    if secret:
        if not secret.metadata.owner_references \
        or not secret.metadata.owner_references[0] == owner_reference:
            raise kopf.TemporaryError('Unable to manage secret, not the owner!')
        secret = await manage_secret_values(config, secret, logger)
    else:
        secret = await create_secret(config, name, namespace, owner_reference, logger)
    await update_config_map_status(name, namespace, config_map, secret)

@kopf.on.startup()
def configure(settings: kopf.OperatorSettings, **_):
    # Disable scanning for Namespaces and CustomResourceDefinitions
    settings.scanning.disabled = True

@kopf.on.create('', 'v1', 'configmaps', labels={config_map_label: kopf.PRESENT})
async def on_create_config_map(body, name, namespace, logger, **_):
    logger.info("New app ConfigMap '%s'", name)
    await manage_secret_for_config_map(name, namespace, body, logger)

@kopf.on.update('', 'v1', 'configmaps', labels={config_map_label: kopf.PRESENT})
async def on_create_config_map(body, name, namespace, logger, **_):
    logger.info("New app ConfigMap '%s'", name)
    await manage_secret_for_config_map(name, namespace, body, logger)

Logs

[2023-05-10 15:31:32,856] kopf.activities.star [INFO    ] Activity 'configure' succeeded.
[2023-05-10 15:31:32,857] kopf._core.engines.a [INFO    ] Initial authentication has been initiated.
[2023-05-10 15:31:32,859] kopf.activities.auth [INFO    ] Activity 'login_via_pykube' succeeded.
[2023-05-10 15:31:32,860] kopf.activities.auth [INFO    ] Activity 'login_via_client' succeeded.
[2023-05-10 15:31:32,860] kopf._core.engines.a [INFO    ] Initial authentication has finished.
[2023-05-10 15:32:23,878] kopf.objects         [INFO    ] [python-kopf/kopf-simple-mrlfc] New app ConfigMap 'kopf-simple-mrlfc'
[2023-05-10 15:32:23,879] kopf.objects         [ERROR   ] [python-kopf/kopf-simple-mrlfc] Handler 'on_create_config_map' failed with an exception. Will retry.
Traceback (most recent call last):
  File "/opt/app-root/lib64/python3.11/site-packages/kopf/_core/actions/execution.py", line 276, in execute_handler_once
    result = await invoke_handler(
             ^^^^^^^^^^^^^^^^^^^^^
  File "/opt/app-root/lib64/python3.11/site-packages/kopf/_core/actions/execution.py", line 371, in invoke_handler
    result = await invocation.invoke(
             ^^^^^^^^^^^^^^^^^^^^^^^^
  File "/opt/app-root/lib64/python3.11/site-packages/kopf/_core/actions/invocation.py", line 116, in invoke
    result = await fn(**kwargs)  # type: ignore
             ^^^^^^^^^^^^^^^^^^
  File "/opt/app-root/operator/operator.py", line 143, in on_create_config_map
    await manage_secret_for_config_map(name, namespace, body, logger)
  File "/opt/app-root/operator/operator.py", line 125, in manage_secret_for_config_map
    secret = await get_secret(name, namespace)
             ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/opt/app-root/operator/operator.py", line 59, in get_secret
    return await core_v1_api.read_namespaced_secret(name, namespace)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/opt/app-root/lib64/python3.11/site-packages/kubernetes_asyncio/client/api_client.py", line 185, in __call_api
    response_data = await self.request(
                    ^^^^^^^^^^^^^^^^^^^
  File "/opt/app-root/lib64/python3.11/site-packages/kubernetes_asyncio/client/rest.py", line 193, in GET
    return (await self.request("GET", url,
            ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/opt/app-root/lib64/python3.11/site-packages/kubernetes_asyncio/client/rest.py", line 177, in request
    r = await self.pool_manager.request(**args)
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/opt/app-root/lib64/python3.11/site-packages/aiohttp/client.py", line 467, in _request
    with timer:
  File "/opt/app-root/lib64/python3.11/site-packages/aiohttp/helpers.py", line 701, in __enter__
    raise RuntimeError(
RuntimeError: Timeout context manager should be used inside a task
[2023-05-10 15:32:23,904] kopf.objects         [WARNING ] [python-kopf/kopf-simple-mrlfc] Patching failed with inconsistencies: (('remove', ('status',), {'kopf': {'progress': {'on_create_config_map': {'started': '2023-05-10T15:32:23.878239', 'stopped': None, 'delayed': '2023-05-10T15:33:23.882168', 'purpose': 'create', 'retries': 1, 'success': False, 'failure': False, 'message': 'Timeout context manager should be used inside a task', 'subrefs': None}}}}, None),)

Additional information

Same error occurs with Python 3.9.

Perhaps related to #998 ?

It seems perhaps this could be addressed by wrapping code in asyncio.create_task() like:

task = asyncio.create_task(
  manage_secret_for_config_map(name, namespace, body, logger)
)
await task

But, this change feels like a breaking change between kopf 1.36.0 and 1.36.1? It doesn't feel like it is the sort of change that should be required for a semver patch level change in the kopf library?

I found a solution for this issue. For some reason the kubernetes_asyncio library needs to now have this line:

core_v1_api = kubernetes_asyncio.client.CoreV1Api()

Run within the context of an async function. So I moved it into @kopf.on.startup and it now works as expected. Still can't figure out why this behavior changed between kopf 1.36.0 and 1.36.1, but this is probably a better way to initalize the api anyway.