stackstorm 22. 源码分析之----stackstorm的sensor服务分析

1 总入口
##################################################
源码文件:st2/st2reactor/bin/st2sensorcontainer
import sys

from st2reactor.cmd import sensormanager

if __name__ == '__main__':
    sys.exit(sensormanager.main())

2 调用

#########################################

源码文件:st2/st2reactor/st2reactor/cmd/sensormanager.py

import os
import sys

from st2common import log as logging
from st2common.logging.misc import get_logger_name_for_module
from st2common.service_setup import setup as common_setup
from st2common.service_setup import teardown as common_teardown
from st2common.util.monkey_patch import monkey_patch
from st2common.exceptions.sensors import SensorNotFoundException
from st2common.constants.exit_codes import FAILURE_EXIT_CODE
from st2reactor.sensor import config
from st2reactor.container.manager import SensorContainerManager
from st2reactor.container.partitioner_lookup import get_sensors_partitioner

__all__ = [
    'main'
]

monkey_patch()

LOGGER_NAME = get_logger_name_for_module(sys.modules[__name__])
LOG = logging.getLogger(LOGGER_NAME)


def _setup():
    common_setup(service='sensorcontainer', config=config, setup_db=True,
                 register_mq_exchanges=True, register_signal_handlers=True)


def _teardown():
    common_teardown()


def main():
    try:
        _setup()
        sensors_partitioner = get_sensors_partitioner()
        container_manager = SensorContainerManager(sensors_partitioner=sensors_partitioner)
        return container_manager.run_sensors()
    except SystemExit as exit_code:
        return exit_code
    except SensorNotFoundException as e:
        LOG.exception(e)
        return 1
    except:
        LOG.exception('(PID:%s) SensorContainer quit due to exception.', os.getpid())
        return FAILURE_EXIT_CODE
    finally:
        _teardown()

3 调用

##################################
源码文件:st2/st2common/st2common/service_setup.py
from __future__ import absolute_import

import os
import traceback

from oslo_config import cfg

from st2common import log as logging
from st2common.constants.logging import DEFAULT_LOGGING_CONF_PATH
from st2common.transport.bootstrap_utils import register_exchanges_with_retry
from st2common.signal_handlers import register_common_signal_handlers
from st2common.util.debugging import enable_debugging
from st2common.models.utils.profiling import enable_profiling
from st2common import triggers
from st2common.rbac.migrations import run_all as run_all_rbac_migrations

# Note: This is here for backward compatibility.
# Function has been moved in a standalone module to avoid expensive in-direct
# import costs
from st2common.database_setup import db_setup
from st2common.database_setup import db_teardown


__all__ = [
    'setup',
    'teardown',

    'db_setup',
    'db_teardown'
]

LOG = logging.getLogger(__name__)


def setup(service, config, setup_db=True, register_mq_exchanges=True,
          register_signal_handlers=True, register_internal_trigger_types=False,
          run_migrations=True, config_args=None):
    """
    Common setup function.

    Currently it performs the following operations:

    1. Parses config and CLI arguments
    2. Establishes DB connection
    3. Set log level for all the loggers to DEBUG if --debug flag is present or
       if system.debug config option is set to True.
    4. Registers RabbitMQ exchanges
    5. Registers common signal handlers
    6. Register internal trigger types

    :param service: Name of the service.
    :param config: Config object to use to parse args.
    """
    # Set up logger which logs everything which happens during and before config
    # parsing to sys.stdout
    logging.setup(DEFAULT_LOGGING_CONF_PATH, excludes=None)

    # Parse args to setup config.
    if config_args:
        config.parse_args(config_args)
    else:
        config.parse_args()

    config_file_paths = cfg.CONF.config_file
    config_file_paths = [os.path.abspath(path) for path in config_file_paths]
    LOG.debug('Using config files: %s', ','.join(config_file_paths))

    # Setup logging.
    logging_config_path = config.get_logging_config_path()
    logging_config_path = os.path.abspath(logging_config_path)

    LOG.debug('Using logging config: %s', logging_config_path)

    try:
        logging.setup(logging_config_path, redirect_stderr=cfg.CONF.log.redirect_stderr,
                      excludes=cfg.CONF.log.excludes)
    except KeyError as e:
        tb_msg = traceback.format_exc()
        if 'log.setLevel' in tb_msg:
            msg = 'Invalid log level selected. Log level names need to be all uppercase.'
            msg += '\n\n' + getattr(e, 'message', str(e))
            raise KeyError(msg)
        else:
            raise e

    if cfg.CONF.debug or cfg.CONF.system.debug:
        enable_debugging()

    if cfg.CONF.profile:
        enable_profiling()

    # All other setup which requires config to be parsed and logging to
    # be correctly setup.
    if setup_db:
        db_setup()

    if register_mq_exchanges:
        register_exchanges_with_retry()

    if register_signal_handlers:
        register_common_signal_handlers()

    if register_internal_trigger_types:
        triggers.register_internal_trigger_types()

    # TODO: This is a "not so nice" workaround until we have a proper migration system in place
    if run_migrations:
        run_all_rbac_migrations()


def teardown():
    """
    Common teardown function.
    """
    db_teardown()
4 调用
########################################
源码文件:st2/st2reactor/st2reactor/container/partitioner_lookup.py
import copy
from oslo_config import cfg

from st2common import log as logging
from st2common.constants.sensors import DEFAULT_PARTITION_LOADER, KVSTORE_PARTITION_LOADER, \
    FILE_PARTITION_LOADER, HASH_PARTITION_LOADER
from st2common.exceptions.sensors import SensorPartitionerNotSupportedException
from st2reactor.container.partitioners import DefaultPartitioner, KVStorePartitioner, \
    FileBasedPartitioner, SingleSensorPartitioner
from st2reactor.container.hash_partitioner import HashPartitioner

__all__ = [
    'get_sensors_partitioner'
]

LOG = logging.getLogger(__name__)

PROVIDERS = {
    DEFAULT_PARTITION_LOADER: DefaultPartitioner,
    KVSTORE_PARTITION_LOADER: KVStorePartitioner,
    FILE_PARTITION_LOADER: FileBasedPartitioner,
    HASH_PARTITION_LOADER: HashPartitioner
}


def get_sensors_partitioner():
    if cfg.CONF.sensor_ref:
        return SingleSensorPartitioner(sensor_ref=cfg.CONF.sensor_ref)
    partition_provider_config = copy.copy(cfg.CONF.sensorcontainer.partition_provider)
    partition_provider = partition_provider_config.pop('name')
    sensor_node_name = cfg.CONF.sensorcontainer.sensor_node_name

    provider = PROVIDERS.get(partition_provider.lower(), None)
    LOG.info('Using partitioner %s with sensornode %s.', partition_provider, sensor_node_name)
    if not provider:
        raise SensorPartitionerNotSupportedException(
            'Partition provider %s not found.' % partition_provider)

    # pass in extra config with no analysis
    return provider(sensor_node_name=sensor_node_name, **partition_provider_config)
5 调用
#######################################
源码文件:st2/st2common/st2common/constants/sensors.py
# Minimum poll interval for a sensor
MINIMUM_POLL_INTERVAL = 4

# keys for PARTITION loaders
DEFAULT_PARTITION_LOADER = 'default'
KVSTORE_PARTITION_LOADER = 'kvstore'
FILE_PARTITION_LOADER = 'file'
HASH_PARTITION_LOADER = 'hash'
6 调用
################################################
源码文件:st2/st2reactor/st2reactor/container/partitioners.py
import sets
import yaml

from st2common import log as logging
from st2common.exceptions.sensors import SensorNotFoundException, \
    SensorPartitionMapMissingException
from st2common.persistence.keyvalue import KeyValuePair
from st2common.persistence.sensor import SensorType


__all__ = [
    'get_all_enabled_sensors',
    'DefaultPartitioner',
    'KVStorePartitioner',
    'FileBasedPartitioner',
    'SingleSensorPartitioner'
]

LOG = logging.getLogger(__name__)


def get_all_enabled_sensors():
    # only query for enabled sensors.
    sensors = SensorType.query(enabled=True)
    LOG.info('Found %d registered sensors in db scan.', len(sensors))
    return sensors


class DefaultPartitioner(object):

    def __init__(self, sensor_node_name):
        self.sensor_node_name = sensor_node_name

    def is_sensor_owner(self, sensor_db):
        """
        All sensors are supported
        """
        # No enabled check here as this could also be due to a delete or update
        return sensor_db is not None

    def get_sensors(self):
        all_enabled_sensors = get_all_enabled_sensors()

        sensor_refs = self.get_required_sensor_refs()

        # None has special meaning and is different from empty array.
        if sensor_refs is None:
            return all_enabled_sensors

        partition_members = []

        for sensor in all_enabled_sensors:
            # pylint: disable=unsupported-membership-test
            sensor_ref = sensor.get_reference()
            if sensor_ref.ref in sensor_refs:
                partition_members.append(sensor)

        return partition_members

    def get_required_sensor_refs(self):
        return None


class KVStorePartitioner(DefaultPartitioner):

    def __init__(self, sensor_node_name):
        super(KVStorePartitioner, self).__init__(sensor_node_name=sensor_node_name)
        self._supported_sensor_refs = None

    def is_sensor_owner(self, sensor_db):
        return sensor_db.get_reference().ref in self._supported_sensor_refs

    def get_required_sensor_refs(self):
        partition_lookup_key = self._get_partition_lookup_key(self.sensor_node_name)

        kvp = KeyValuePair.get_by_name(partition_lookup_key)
        sensor_refs_str = kvp.value if kvp.value else ''
        self._supported_sensor_refs = sets.Set([
            sensor_ref.strip() for sensor_ref in sensor_refs_str.split(',')])
        return self._supported_sensor_refs

    def _get_partition_lookup_key(self, sensor_node_name):
        return '{}.sensor_partition'.format(sensor_node_name)


class FileBasedPartitioner(DefaultPartitioner):

    def __init__(self, sensor_node_name, partition_file):
        super(FileBasedPartitioner, self).__init__(sensor_node_name=sensor_node_name)
        self.partition_file = partition_file
        self._supported_sensor_refs = None

    def is_sensor_owner(self, sensor_db):
        return sensor_db.get_reference().ref in self._supported_sensor_refs and sensor_db.enabled

    def get_required_sensor_refs(self):
        with open(self.partition_file, 'r') as f:
            partition_map = yaml.safe_load(f)
            sensor_refs = partition_map.get(self.sensor_node_name, None)
            if sensor_refs is None:
                raise SensorPartitionMapMissingException('Sensor partition not found for %s in %s.',
                                                         self.sensor_node_name, self.partition_file)
            self._supported_sensor_refs = sets.Set(sensor_refs)
            return self._supported_sensor_refs


class SingleSensorPartitioner(object):

    def __init__(self, sensor_ref):
        self._sensor_ref = sensor_ref

    def get_sensors(self):
        sensor = SensorType.get_by_ref(self._sensor_ref)
        if not sensor:
            raise SensorNotFoundException('Sensor %s not found in db.' % self._sensor_ref)
        return [sensor]

    def is_sensor_owner(self, sensor_db):
        """
        No other sensor supported just the single sensor which was previously loaded.
        """
        return False
7 调用
###############################################
源码文件:st2/st2reactor/st2reactor/container/manager.py
import os
import sys
import signal

import eventlet

from st2common import log as logging
from st2reactor.container.process_container import ProcessSensorContainer
from st2common.services.sensor_watcher import SensorWatcher
from st2common.models.system.common import ResourceReference

LOG = logging.getLogger(__name__)


class SensorContainerManager(object):

    def __init__(self, sensors_partitioner):
        self._sensor_container = None
        self._sensors_watcher = SensorWatcher(create_handler=self._handle_create_sensor,
                                              update_handler=self._handle_update_sensor,
                                              delete_handler=self._handle_delete_sensor,
                                              queue_suffix='sensor_container')
        self._container_thread = None
        if not sensors_partitioner:
            raise ValueError('sensors_partitioner should be non-None.')
        self._sensors_partitioner = sensors_partitioner

    def run_sensors(self):
        """
        Run all sensors as determined by sensors_partitioner.
        """
        sensors = self._sensors_partitioner.get_sensors()
        if sensors:
            LOG.info('Setting up container to run %d sensors.', len(sensors))
            LOG.info('\tSensors list - %s.', [self._get_sensor_ref(sensor) for sensor in sensors])

        sensors_to_run = []
        for sensor in sensors:
            # TODO: Directly pass DB object to the ProcessContainer
            sensors_to_run.append(self._to_sensor_object(sensor))

        LOG.info('(PID:%s) SensorContainer started.', os.getpid())
        self._setup_sigterm_handler()
        self._spin_container_and_wait(sensors_to_run)

    def _spin_container_and_wait(self, sensors):
        try:
            self._sensor_container = ProcessSensorContainer(sensors=sensors)
            self._container_thread = eventlet.spawn(self._sensor_container.run)
            LOG.debug('Starting sensor CUD watcher...')
            self._sensors_watcher.start()
            exit_code = self._container_thread.wait()
            LOG.error('Process container quit with exit_code %d.', exit_code)
            LOG.error('(PID:%s) SensorContainer stopped.', os.getpid())
        except (KeyboardInterrupt, SystemExit):
            self._sensor_container.shutdown()
            self._sensors_watcher.stop()

            LOG.info('(PID:%s) SensorContainer stopped. Reason - %s', os.getpid(),
                     sys.exc_info()[0].__name__)

            eventlet.kill(self._container_thread)
            self._container_thread = None

            return 0

    def _setup_sigterm_handler(self):

        def sigterm_handler(signum=None, frame=None):
            # This will cause SystemExit to be throw and we call sensor_container.shutdown()
            # there which cleans things up.
            sys.exit(0)

        # Register a SIGTERM signal handler which calls sys.exit which causes SystemExit to
        # be thrown. We catch SystemExit and handle cleanup there.
        signal.signal(signal.SIGTERM, sigterm_handler)

    def _to_sensor_object(self, sensor_db):
        file_path = sensor_db.artifact_uri.replace('file://', '')
        class_name = sensor_db.entry_point.split('.')[-1]

        sensor_obj = {
            'pack': sensor_db.pack,
            'file_path': file_path,
            'class_name': class_name,
            'trigger_types': sensor_db.trigger_types,
            'poll_interval': sensor_db.poll_interval,
            'ref': self._get_sensor_ref(sensor_db)
        }

        return sensor_obj

    #################################################
    # Event handler methods for the sensor CUD events
    #################################################

    def _handle_create_sensor(self, sensor):
        if not self._sensors_partitioner.is_sensor_owner(sensor):
            LOG.info('sensor %s is not supported. Ignoring create.', self._get_sensor_ref(sensor))
            return
        if not sensor.enabled:
            LOG.info('sensor %s is not enabled.', self._get_sensor_ref(sensor))
            return
        LOG.info('Adding sensor %s.', self._get_sensor_ref(sensor))
        self._sensor_container.add_sensor(sensor=self._to_sensor_object(sensor))

    def _handle_update_sensor(self, sensor):
        if not self._sensors_partitioner.is_sensor_owner(sensor):
            LOG.info('sensor %s is not supported. Ignoring update.', self._get_sensor_ref(sensor))
            return
        sensor_ref = self._get_sensor_ref(sensor)
        sensor_obj = self._to_sensor_object(sensor)

        # Handle disabling sensor
        if not sensor.enabled:
            LOG.info('Sensor %s disabled. Unloading sensor.', sensor_ref)
            self._sensor_container.remove_sensor(sensor=sensor_obj)
            return

        LOG.info('Sensor %s updated. Reloading sensor.', sensor_ref)
        try:
            self._sensor_container.remove_sensor(sensor=sensor_obj)
        except:
            LOG.exception('Failed to reload sensor %s', sensor_ref)
        else:
            self._sensor_container.add_sensor(sensor=sensor_obj)
            LOG.info('Sensor %s reloaded.', sensor_ref)

    def _handle_delete_sensor(self, sensor):
        if not self._sensors_partitioner.is_sensor_owner(sensor):
            LOG.info('sensor %s is not supported. Ignoring delete.', self._get_sensor_ref(sensor))
            return
        LOG.info('Unloading sensor %s.', self._get_sensor_ref(sensor))
        self._sensor_container.remove_sensor(sensor=self._to_sensor_object(sensor))

    def _get_sensor_ref(self, sensor):
        return ResourceReference.to_string_reference(pack=sensor.pack, name=sensor.name)
8 调用
##############################
源码文件:st2/st2reactor/st2reactor/container/process_container.py
import os
import sys
import time
import json
import subprocess

from collections import defaultdict

import eventlet
from eventlet.support import greenlets as greenlet
from oslo_config import cfg

from st2common import log as logging
from st2common.constants.error_messages import PACK_VIRTUALENV_DOESNT_EXIST
from st2common.constants.system import API_URL_ENV_VARIABLE_NAME
from st2common.constants.system import AUTH_TOKEN_ENV_VARIABLE_NAME
from st2common.constants.triggers import (SENSOR_SPAWN_TRIGGER, SENSOR_EXIT_TRIGGER)
from st2common.constants.exit_codes import SUCCESS_EXIT_CODE
from st2common.constants.exit_codes import FAILURE_EXIT_CODE
from st2common.models.system.common import ResourceReference
from st2common.services.access import create_token
from st2common.transport.reactor import TriggerDispatcher
from st2common.util.api import get_full_public_api_url
from st2common.util.pack import get_pack_common_libs_path_for_pack_ref
from st2common.util.shell import on_parent_exit
from st2common.util.sandboxing import get_sandbox_python_path
from st2common.util.sandboxing import get_sandbox_python_binary_path
from st2common.util.sandboxing import get_sandbox_virtualenv_path

__all__ = [
    'ProcessSensorContainer'
]

LOG = logging.getLogger('st2reactor.process_sensor_container')

BASE_DIR = os.path.dirname(os.path.abspath(__file__))
WRAPPER_SCRIPT_NAME = 'sensor_wrapper.py'
WRAPPER_SCRIPT_PATH = os.path.join(BASE_DIR, WRAPPER_SCRIPT_NAME)

# How many times to try to subsequently respawn a sensor after a non-zero exit before giving up
SENSOR_MAX_RESPAWN_COUNTS = 2

# How many seconds after the sensor has been started we should wait before considering sensor as
# being started and running successfully
SENSOR_SUCCESSFUL_START_THRESHOLD = 10

# How long to wait (in seconds) before respawning a dead process
SENSOR_RESPAWN_DELAY = 2.5

# How long to wait for process to exit after sending SIGTERM signal. If the process doesn't
# exit in this amount of seconds, SIGKILL signal will be sent to the process.
PROCESS_EXIT_TIMEOUT = 5

# TODO: Allow multiple instances of the same sensor with different configuration
# options - we need to update sensors for that and add "get_id" or similar
# method to the sensor class


class ProcessSensorContainer(object):
    """
    Sensor container which runs sensors in a separate process.
    """

    def __init__(self, sensors, poll_interval=5, dispatcher=None):
        """
        :param sensors: A list of sensor dicts.
        :type sensors: ``list`` of ``dict``

        :param poll_interval: How long to sleep between each poll for running / dead sensors.
        :type poll_interval: ``float``
        """
        self._poll_interval = poll_interval

        self._sensors = {}  # maps sensor_id -> sensor object
        self._processes = {}  # maps sensor_id -> sensor process

        if not dispatcher:
            dispatcher = TriggerDispatcher(LOG)
        self._dispatcher = dispatcher

        self._stopped = False

        sensors = sensors or []
        for sensor_obj in sensors:
            sensor_id = self._get_sensor_id(sensor=sensor_obj)
            self._sensors[sensor_id] = sensor_obj

        # Stores information needed for respawning dead sensors
        self._sensor_start_times = {}  # maps sensor_id -> sensor start time
        self._sensor_respawn_counts = defaultdict(int)  # maps sensor_id -> number of respawns

        # A list of all the instance variables which hold internal state information about a
        # particular_sensor
        # Note: We don't clear respawn counts since we want to track this through the whole life
        # cycle of the container manager
        self._internal_sensor_state_variables = [
            self._processes,
            self._sensors,
            self._sensor_start_times,
        ]

        self._enable_common_pack_libs = cfg.CONF.packs.enable_common_libs or False

    def run(self):
        self._run_all_sensors()

        try:
            while not self._stopped:
                # Poll for all running processes
                sensor_ids = self._sensors.keys()

                if len(sensor_ids) >= 1:
                    LOG.debug('%d active sensor(s)' % (len(sensor_ids)))
                    self._poll_sensors_for_results(sensor_ids)
                else:
                    LOG.debug('No active sensors')

                eventlet.sleep(self._poll_interval)
        except greenlet.GreenletExit:
            # This exception is thrown when sensor container manager
            # kills the thread which runs process container. Not sure
            # if this is the best thing to do.
            self._stopped = True
            return SUCCESS_EXIT_CODE
        except:
            LOG.exception('Container failed to run sensors.')
            self._stopped = True
            return FAILURE_EXIT_CODE

        self._stopped = True
        LOG.error('Process container quit. It shouldn\'t.')
        return SUCCESS_EXIT_CODE

    def _poll_sensors_for_results(self, sensor_ids):
        """
        Main loop which polls sensor for results and detects dead sensors.
        """
        for sensor_id in sensor_ids:
            now = int(time.time())

            process = self._processes[sensor_id]
            status = process.poll()

            if status is not None:
                # Dead process detected
                LOG.info('Process for sensor %s has exited with code %s', sensor_id, status)

                sensor = self._sensors[sensor_id]
                self._delete_sensor(sensor_id)

                self._dispatch_trigger_for_sensor_exit(sensor=sensor,
                                                       exit_code=status)

                # Try to respawn a dead process (maybe it was a simple failure which can be
                # resolved with a restart)
                eventlet.spawn_n(self._respawn_sensor, sensor_id=sensor_id, sensor=sensor,
                                 exit_code=status)
            else:
                sensor_start_time = self._sensor_start_times[sensor_id]
                sensor_respawn_count = self._sensor_respawn_counts[sensor_id]
                successfuly_started = (now - sensor_start_time) >= SENSOR_SUCCESSFUL_START_THRESHOLD

                if successfuly_started and sensor_respawn_count >= 1:
                    # Sensor has been successfully running more than threshold seconds, clear the
                    # respawn counter so we can try to restart the sensor if it dies later on
                    self._sensor_respawn_counts[sensor_id] = 0

    def running(self):
        return len(self._processes)

    def stopped(self):
        return self._stopped

    def shutdown(self, force=False):
        LOG.info('Container shutting down. Invoking cleanup on sensors.')
        self._stopped = True

        if force:
            exit_timeout = 0
        else:
            exit_timeout = PROCESS_EXIT_TIMEOUT

        sensor_ids = self._sensors.keys()
        for sensor_id in sensor_ids:
            self._stop_sensor_process(sensor_id=sensor_id, exit_timeout=exit_timeout)

        LOG.info('All sensors are shut down.')

        self._sensors = {}
        self._processes = {}

    def add_sensor(self, sensor):
        """
        Add a new sensor to the container.

        :type sensor: ``dict``
        """
        sensor_id = self._get_sensor_id(sensor=sensor)

        if sensor_id in self._sensors:
            LOG.warning('Sensor %s already exists and running.', sensor_id)
            return False

        self._spawn_sensor_process(sensor=sensor)
        LOG.debug('Sensor %s started.', sensor_id)
        self._sensors[sensor_id] = sensor
        return True

    def remove_sensor(self, sensor):
        """
        Remove an existing sensor from the container.

        :type sensor: ``dict``
        """
        sensor_id = self._get_sensor_id(sensor=sensor)

        if sensor_id not in self._sensors:
            LOG.warning('Sensor %s isn\'t running in this container.', sensor_id)
            return False

        self._stop_sensor_process(sensor_id=sensor_id)
        LOG.debug('Sensor %s stopped.', sensor_id)
        return True

    def _run_all_sensors(self):
        sensor_ids = self._sensors.keys()

        for sensor_id in sensor_ids:
            sensor_obj = self._sensors[sensor_id]
            LOG.info('Running sensor %s', sensor_id)

            try:
                self._spawn_sensor_process(sensor=sensor_obj)
            except Exception as e:
                LOG.warning(e.message, exc_info=True)

                # Disable sensor which we are unable to start
                del self._sensors[sensor_id]
                continue

            LOG.info('Sensor %s started' % sensor_id)

    def _spawn_sensor_process(self, sensor):
        """
        Spawn a new process for the provided sensor.

        New process uses isolated Python binary from a virtual environment
        belonging to the sensor pack.
        """
        sensor_id = self._get_sensor_id(sensor=sensor)
        pack_ref = sensor['pack']

        virtualenv_path = get_sandbox_virtualenv_path(pack=pack_ref)
        python_path = get_sandbox_python_binary_path(pack=pack_ref)

        if virtualenv_path and not os.path.isdir(virtualenv_path):
            format_values = {'pack': sensor['pack'], 'virtualenv_path': virtualenv_path}
            msg = PACK_VIRTUALENV_DOESNT_EXIST % format_values
            raise Exception(msg)

        trigger_type_refs = sensor['trigger_types'] or []
        trigger_type_refs = ','.join(trigger_type_refs)

        parent_args = json.dumps(sys.argv[1:])

        args = [
            python_path,
            WRAPPER_SCRIPT_PATH,
            '--pack=%s' % (sensor['pack']),
            '--file-path=%s' % (sensor['file_path']),
            '--class-name=%s' % (sensor['class_name']),
            '--trigger-type-refs=%s' % (trigger_type_refs),
            '--parent-args=%s' % (parent_args)
        ]

        if sensor['poll_interval']:
            args.append('--poll-interval=%s' % (sensor['poll_interval']))

        sandbox_python_path = get_sandbox_python_path(inherit_from_parent=True,
                                                      inherit_parent_virtualenv=True)

        if self._enable_common_pack_libs:
            pack_common_libs_path = get_pack_common_libs_path_for_pack_ref(pack_ref=pack_ref)
        else:
            pack_common_libs_path = None

        env = os.environ.copy()

        if self._enable_common_pack_libs and pack_common_libs_path:
            env['PYTHONPATH'] = pack_common_libs_path + ':' + sandbox_python_path
        else:
            env['PYTHONPATH'] = sandbox_python_path

        # Include full api URL and API token specific to that sensor
        ttl = cfg.CONF.auth.service_token_ttl
        metadata = {
            'service': 'sensors_container',
            'sensor_path': sensor['file_path'],
            'sensor_class': sensor['class_name']
        }
        temporary_token = create_token(username='sensors_container', ttl=ttl, metadata=metadata,
                                       service=True)

        env[API_URL_ENV_VARIABLE_NAME] = get_full_public_api_url()
        env[AUTH_TOKEN_ENV_VARIABLE_NAME] = temporary_token.token

        # TODO 1: Purge temporary token when service stops or sensor process dies
        # TODO 2: Store metadata (wrapper process id) with the token and delete
        # tokens for old, dead processes on startup
        cmd = ' '.join(args)
        LOG.debug('Running sensor subprocess (cmd="%s")', cmd)

        # TODO: Intercept stdout and stderr for aggregated logging purposes
        try:
            process = subprocess.Popen(args=args, stdin=None, stdout=None,
                                       stderr=None, shell=False, env=env,
                                       preexec_fn=on_parent_exit('SIGTERM'))
        except Exception as e:
            cmd = ' '.join(args)
            message = ('Failed to spawn process for sensor %s ("%s"): %s' %
                       (sensor_id, cmd, str(e)))
            raise Exception(message)

        self._processes[sensor_id] = process
        self._sensors[sensor_id] = sensor
        self._sensor_start_times[sensor_id] = int(time.time())

        self._dispatch_trigger_for_sensor_spawn(sensor=sensor, process=process, cmd=cmd)

        return process

    def _stop_sensor_process(self, sensor_id, exit_timeout=PROCESS_EXIT_TIMEOUT):
        """
        Stop a sensor process for the provided sensor.

        :param sensor_id: Sensor ID.
        :type sensor_id: ``str``

        :param exit_timeout: How long to wait for process to exit after
                             sending SIGTERM signal. If the process doesn't
                             exit in this amount of seconds, SIGKILL signal
                             will be sent to the process.
        :type exit__timeout: ``int``
        """
        process = self._processes[sensor_id]

        # Delete sensor before terminating process so that it will not be
        # respawned during termination
        self._delete_sensor(sensor_id)

        # Terminate the process and wait for up to stop_timeout seconds for the
        # process to exit
        process.terminate()

        timeout = 0
        sleep_delay = 1
        while timeout < exit_timeout:
            status = process.poll()

            if status is not None:
                # Process has exited
                break

            timeout += sleep_delay
            time.sleep(sleep_delay)

        if status is None:
            # Process hasn't exited yet, forcefully kill it
            process.kill()

    def _respawn_sensor(self, sensor_id, sensor, exit_code):
        """
        Method for respawning a sensor which died with a non-zero exit code.
        """
        extra = {'sensor_id': sensor_id, 'sensor': sensor}

        if self._stopped:
            LOG.debug('Stopped, not respawning a dead sensor', extra=extra)
            return

        should_respawn = self._should_respawn_sensor(sensor_id=sensor_id, sensor=sensor,
                                                     exit_code=exit_code)

        if not should_respawn:
            LOG.debug('Not respawning a dead sensor', extra=extra)
            return

        LOG.debug('Respawning dead sensor', extra=extra)

        self._sensor_respawn_counts[sensor_id] += 1
        sleep_delay = (SENSOR_RESPAWN_DELAY * self._sensor_respawn_counts[sensor_id])
        eventlet.sleep(sleep_delay)

        try:
            self._spawn_sensor_process(sensor=sensor)
        except Exception as e:
            LOG.warning(e.message, exc_info=True)

            # Disable sensor which we are unable to start
            del self._sensors[sensor_id]

    def _should_respawn_sensor(self, sensor_id, sensor, exit_code):
        """
        Return True if the provided sensor should be respawned, False otherwise.
        """
        if exit_code == 0:
            # We only try to respawn sensors which exited with non-zero status code
            return False

        respawn_count = self._sensor_respawn_counts[sensor_id]
        if respawn_count >= SENSOR_MAX_RESPAWN_COUNTS:
            LOG.debug('Sensor has already been respawned max times, giving up')
            return False

        return True

    def _get_sensor_id(self, sensor):
        """
        Return unique identifier for the provider sensor dict.

        :type sensor: ``dict``
        """
        sensor_id = sensor['ref']
        return sensor_id

    def _dispatch_trigger_for_sensor_spawn(self, sensor, process, cmd):
        trigger = ResourceReference.to_string_reference(
            name=SENSOR_SPAWN_TRIGGER['name'],
            pack=SENSOR_SPAWN_TRIGGER['pack'])
        now = int(time.time())
        payload = {
            'id': sensor['class_name'],
            'timestamp': now,
            'pid': process.pid,
            'cmd': cmd
        }
        self._dispatcher.dispatch(trigger, payload=payload)

    def _dispatch_trigger_for_sensor_exit(self, sensor, exit_code):
        trigger = ResourceReference.to_string_reference(
            name=SENSOR_EXIT_TRIGGER['name'],
            pack=SENSOR_EXIT_TRIGGER['pack'])
        now = int(time.time())
        payload = {
            'id': sensor['class_name'],
            'timestamp': now,
            'exit_code': exit_code
        }
        self._dispatcher.dispatch(trigger, payload=payload)

    def _delete_sensor(self, sensor_id):
        """
        Delete / reset all the internal state about a particular sensor.
        """
        for var in self._internal_sensor_state_variables:
            if sensor_id in var:
                del var[sensor_id]
分析:
关键方法:
    def _spawn_sensor_process(self, sensor):
        """
        Spawn a new process for the provided sensor.

        New process uses isolated Python binary from a virtual environment
        belonging to the sensor pack.
        """
        sensor_id = self._get_sensor_id(sensor=sensor)
        pack_ref = sensor['pack']

        virtualenv_path = get_sandbox_virtualenv_path(pack=pack_ref)
        python_path = get_sandbox_python_binary_path(pack=pack_ref)

        if virtualenv_path and not os.path.isdir(virtualenv_path):
            format_values = {'pack': sensor['pack'], 'virtualenv_path': virtualenv_path}
            msg = PACK_VIRTUALENV_DOESNT_EXIST % format_values
            raise Exception(msg)

        trigger_type_refs = sensor['trigger_types'] or []
        trigger_type_refs = ','.join(trigger_type_refs)

        parent_args = json.dumps(sys.argv[1:])

        args = [
            python_path,
            WRAPPER_SCRIPT_PATH,
            '--pack=%s' % (sensor['pack']),
            '--file-path=%s' % (sensor['file_path']),
            '--class-name=%s' % (sensor['class_name']),
            '--trigger-type-refs=%s' % (trigger_type_refs),
            '--parent-args=%s' % (parent_args)
        ]

        if sensor['poll_interval']:
            args.append('--poll-interval=%s' % (sensor['poll_interval']))

        sandbox_python_path = get_sandbox_python_path(inherit_from_parent=True,
                                                      inherit_parent_virtualenv=True)

        if self._enable_common_pack_libs:
            pack_common_libs_path = get_pack_common_libs_path_for_pack_ref(pack_ref=pack_ref)
        else:
            pack_common_libs_path = None

        env = os.environ.copy()

        if self._enable_common_pack_libs and pack_common_libs_path:
            env['PYTHONPATH'] = pack_common_libs_path + ':' + sandbox_python_path
        else:
            env['PYTHONPATH'] = sandbox_python_path

        # Include full api URL and API token specific to that sensor
        ttl = cfg.CONF.auth.service_token_ttl
        metadata = {
            'service': 'sensors_container',
            'sensor_path': sensor['file_path'],
            'sensor_class': sensor['class_name']
        }
        temporary_token = create_token(username='sensors_container', ttl=ttl, metadata=metadata,
                                       service=True)

        env[API_URL_ENV_VARIABLE_NAME] = get_full_public_api_url()
        env[AUTH_TOKEN_ENV_VARIABLE_NAME] = temporary_token.token

        # TODO 1: Purge temporary token when service stops or sensor process dies
        # TODO 2: Store metadata (wrapper process id) with the token and delete
        # tokens for old, dead processes on startup
        cmd = ' '.join(args)
        LOG.debug('Running sensor subprocess (cmd="%s")', cmd)

        # TODO: Intercept stdout and stderr for aggregated logging purposes
        try:
            process = subprocess.Popen(args=args, stdin=None, stdout=None,
                                       stderr=None, shell=False, env=env,
                                       preexec_fn=on_parent_exit('SIGTERM'))
        except Exception as e:
            cmd = ' '.join(args)
            message = ('Failed to spawn process for sensor %s ("%s"): %s' %
                       (sensor_id, cmd, str(e)))
            raise Exception(message)

        self._processes[sensor_id] = process
        self._sensors[sensor_id] = sensor
        self._sensor_start_times[sensor_id] = int(time.time())

        self._dispatch_trigger_for_sensor_spawn(sensor=sensor, process=process, cmd=cmd)

        return process
对每个pack下面的sensor启用独立的python虚拟环境来运行

            process = subprocess.Popen(args=args, stdin=None, stdout=None,
                                       stderr=None, shell=False, env=env,
                                       preexec_fn=on_parent_exit('SIGTERM'))
启动子进程执行命令。
9 调用
#############################
源码文件:st2/st2reactor/st2reactor/container/sensor_wrapper.py
import os
import json
import atexit
import argparse
import traceback

from oslo_config import cfg
from jsonschema import ValidationError

from st2common import log as logging
from st2common.logging.misc import set_log_level_for_all_loggers
from st2common.models.api.trace import TraceContext
from st2common.models.api.trigger import TriggerAPI
from st2common.persistence.db_init import db_setup_with_retry
from st2common.transport.reactor import TriggerDispatcher
from st2common.util import loader
from st2common.util.config_loader import ContentPackConfigLoader
from st2common.services.triggerwatcher import TriggerWatcher
from st2reactor.sensor.base import Sensor
from st2reactor.sensor.base import PollingSensor
from st2reactor.sensor import config
from st2common.services.datastore import SensorDatastoreService
from st2common.util.monkey_patch import monkey_patch
from st2common.validators.api.reactor import validate_trigger_payload

__all__ = [
    'SensorWrapper',
    'SensorService'
]

monkey_patch()


class SensorService(object):
    """
    Instance of this class is passed to the sensor instance and exposes "public"
    methods which can be called by the sensor.
    """

    def __init__(self, sensor_wrapper):
        self._sensor_wrapper = sensor_wrapper
        self._logger = self._sensor_wrapper._logger
        self._dispatcher = TriggerDispatcher(self._logger)
        self._datastore_service = SensorDatastoreService(
            logger=self._logger,
            pack_name=self._sensor_wrapper._pack,
            class_name=self._sensor_wrapper._class_name,
            api_username='sensor_service')

        self._client = None

    @property
    def datastore_service(self):
        return self._datastore_service

    def get_logger(self, name):
        """
        Retrieve an instance of a logger to be used by the sensor class.
        """
        logger_name = '%s.%s' % (self._sensor_wrapper._logger.name, name)
        logger = logging.getLogger(logger_name)
        logger.propagate = True

        return logger

    ##################################
    # General methods
    ##################################

    def get_user_info(self):
        return self.datastore_service.get_user_info()

    ##################################
    # Sensor related methods
    ##################################

    def dispatch(self, trigger, payload=None, trace_tag=None):
        """
        Method which dispatches the trigger.

        :param trigger: Full name / reference of the trigger.
        :type trigger: ``str``

        :param payload: Trigger payload.
        :type payload: ``dict``

        :param trace_tag: Tracer to track the triggerinstance.
        :type trace_tags: ``str``
        """
        # empty strings
        trace_context = TraceContext(trace_tag=trace_tag) if trace_tag else None
        self._logger.debug('Added trace_context %s to trigger %s.', trace_context, trigger)
        self.dispatch_with_context(trigger, payload=payload, trace_context=trace_context)

    def dispatch_with_context(self, trigger, payload=None, trace_context=None):
        """
        Method which dispatches the trigger.

        :param trigger: Full name / reference of the trigger.
        :type trigger: ``str``

        :param payload: Trigger payload.
        :type payload: ``dict``

        :param trace_context: Trace context to associate with Trigger.
        :type trace_context: ``st2common.api.models.api.trace.TraceContext``
        """
        # This means specified payload is complied with trigger_type schema, or not.
        is_valid = True
        try:
            validate_trigger_payload(trigger_type_ref=trigger, payload=payload)
        except (ValidationError, Exception) as e:
            is_valid = False
            self._logger.warn('Failed to validate payload (%s) for trigger "%s": %s' %
                              (str(payload), trigger, str(e)))

        # If validation is disabled, still dispatch a trigger even if it failed validation
        # This condition prevents unexpected restriction.
        if not is_valid and cfg.CONF.system.validate_trigger_payload:
            self._logger.warn('Trigger payload validation failed and validation is enabled, not '
                              'dispatching a trigger "%s" (%s)' % (trigger, str(payload)))
            return None

        self._logger.debug('Dispatching trigger %s with payload %s.', trigger, payload)
        self._dispatcher.dispatch(trigger, payload=payload, trace_context=trace_context)

    ##################################
    # Methods for datastore management
    ##################################

    def list_values(self, local=True, prefix=None):
        return self._datastore_service.list_values(local, prefix)

    def get_value(self, name, local=True):
        return self._datastore_service.get_value(name, local)

    def set_value(self, name, value, ttl=None, local=True):
        return self._datastore_service.set_value(name, value, ttl, local)

    def delete_value(self, name, local=True):
        return self._datastore_service.delete_value(name, local)


class SensorWrapper(object):
    def __init__(self, pack, file_path, class_name, trigger_types,
                 poll_interval=None, parent_args=None):
        """
        :param pack: Name of the pack this sensor belongs to.
        :type pack: ``str``

        :param file_path: Path to the sensor module file.
        :type file_path: ``str``

        :param class_name: Sensor class name.
        :type class_name: ``str``

        :param trigger_types: A list of references to trigger types which
                                  belong to this sensor.
        :type trigger_types: ``list`` of ``str``

        :param poll_interval: Sensor poll interval (in seconds).
        :type poll_interval: ``int`` or ``None``

        :param parent_args: Command line arguments passed to the parent process.
        :type parse_args: ``list``
        """
        self._pack = pack
        self._file_path = file_path
        self._class_name = class_name
        self._trigger_types = trigger_types or []
        self._poll_interval = poll_interval
        self._parent_args = parent_args or []
        self._trigger_names = {}

        # 1. Parse the config with inherited parent args
        try:
            config.parse_args(args=self._parent_args)
        except Exception:
            pass

        # 2. Establish DB connection
        username = cfg.CONF.database.username if hasattr(cfg.CONF.database, 'username') else None
        password = cfg.CONF.database.password if hasattr(cfg.CONF.database, 'password') else None
        db_setup_with_retry(cfg.CONF.database.db_name, cfg.CONF.database.host,
                            cfg.CONF.database.port, username=username, password=password,
                            ssl=cfg.CONF.database.ssl, ssl_keyfile=cfg.CONF.database.ssl_keyfile,
                            ssl_certfile=cfg.CONF.database.ssl_certfile,
                            ssl_cert_reqs=cfg.CONF.database.ssl_cert_reqs,
                            ssl_ca_certs=cfg.CONF.database.ssl_ca_certs,
                            ssl_match_hostname=cfg.CONF.database.ssl_match_hostname)

        # 3. Instantiate the watcher
        self._trigger_watcher = TriggerWatcher(create_handler=self._handle_create_trigger,
                                               update_handler=self._handle_update_trigger,
                                               delete_handler=self._handle_delete_trigger,
                                               trigger_types=self._trigger_types,
                                               queue_suffix='sensorwrapper_%s_%s' %
                                               (self._pack, self._class_name),
                                               exclusive=True)

        # 4. Set up logging
        self._logger = logging.getLogger('SensorWrapper.%s.%s' %
                                         (self._pack, self._class_name))
        logging.setup(cfg.CONF.sensorcontainer.logging)

        if '--debug' in parent_args:
            set_log_level_for_all_loggers()

        self._sensor_instance = self._get_sensor_instance()

    def run(self):
        atexit.register(self.stop)

        self._trigger_watcher.start()
        self._logger.info('Watcher started')

        self._logger.info('Running sensor initialization code')
        self._sensor_instance.setup()

        if self._poll_interval:
            message = ('Running sensor in active mode (poll interval=%ss)' %
                       (self._poll_interval))
        else:
            message = 'Running sensor in passive mode'

        self._logger.info(message)

        try:
            self._sensor_instance.run()
        except Exception as e:
            # Include traceback
            msg = ('Sensor "%s" run method raised an exception: %s.' %
                   (self._class_name, str(e)))
            self._logger.warn(msg, exc_info=True)
            raise Exception(msg)

    def stop(self):
        # Stop watcher
        self._logger.info('Stopping trigger watcher')
        self._trigger_watcher.stop()

        # Run sensor cleanup code
        self._logger.info('Invoking cleanup on sensor')
        self._sensor_instance.cleanup()

    ##############################################
    # Event handler methods for the trigger events
    ##############################################

    def _handle_create_trigger(self, trigger):
        self._logger.debug('Calling sensor "add_trigger" method (trigger.type=%s)' %
                           (trigger.type))
        self._trigger_names[str(trigger.id)] = trigger

        trigger = self._sanitize_trigger(trigger=trigger)
        self._sensor_instance.add_trigger(trigger=trigger)

    def _handle_update_trigger(self, trigger):
        self._logger.debug('Calling sensor "update_trigger" method (trigger.type=%s)' %
                           (trigger.type))
        self._trigger_names[str(trigger.id)] = trigger

        trigger = self._sanitize_trigger(trigger=trigger)
        self._sensor_instance.update_trigger(trigger=trigger)

    def _handle_delete_trigger(self, trigger):
        trigger_id = str(trigger.id)
        if trigger_id not in self._trigger_names:
            return

        self._logger.debug('Calling sensor "remove_trigger" method (trigger.type=%s)' %
                           (trigger.type))
        del self._trigger_names[trigger_id]

        trigger = self._sanitize_trigger(trigger=trigger)
        self._sensor_instance.remove_trigger(trigger=trigger)

    def _get_sensor_instance(self):
        """
        Retrieve instance of a sensor class.
        """
        _, filename = os.path.split(self._file_path)
        module_name, _ = os.path.splitext(filename)

        try:
            sensor_class = loader.register_plugin_class(base_class=Sensor,
                                                        file_path=self._file_path,
                                                        class_name=self._class_name)
        except Exception as e:
            tb_msg = traceback.format_exc()
            msg = ('Failed to load sensor class from file "%s" (sensor file most likely doesn\'t '
                   'exist or contains invalid syntax): %s' % (self._file_path, str(e)))
            msg += '\n\n' + tb_msg
            exc_cls = type(e)
            raise exc_cls(msg)

        if not sensor_class:
            raise ValueError('Sensor module is missing a class with name "%s"' %
                             (self._class_name))

        sensor_class_kwargs = {}
        sensor_class_kwargs['sensor_service'] = SensorService(sensor_wrapper=self)

        sensor_config = self._get_sensor_config()
        sensor_class_kwargs['config'] = sensor_config

        if self._poll_interval and issubclass(sensor_class, PollingSensor):
            sensor_class_kwargs['poll_interval'] = self._poll_interval

        try:
            sensor_instance = sensor_class(**sensor_class_kwargs)
        except Exception:
            self._logger.exception('Failed to instantiate "%s" sensor class' % (self._class_name))
            raise Exception('Failed to instantiate "%s" sensor class' % (self._class_name))

        return sensor_instance

    def _get_sensor_config(self):
        config_loader = ContentPackConfigLoader(pack_name=self._pack)
        config = config_loader.get_config()

        if config:
            self._logger.info('Found config for sensor "%s"' % (self._class_name))
        else:
            self._logger.info('No config found for sensor "%s"' % (self._class_name))

        return config

    def _sanitize_trigger(self, trigger):
        sanitized = TriggerAPI.from_model(trigger).to_dict()
        return sanitized


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Sensor runner wrapper')
    parser.add_argument('--pack', required=True,
                        help='Name of the pack this sensor belongs to')
    parser.add_argument('--file-path', required=True,
                        help='Path to the sensor module')
    parser.add_argument('--class-name', required=True,
                        help='Name of the sensor class')
    parser.add_argument('--trigger-type-refs', required=False,
                        help='Comma delimited string of trigger type references')
    parser.add_argument('--poll-interval', type=int, default=None, required=False,
                        help='Sensor poll interval')
    parser.add_argument('--parent-args', required=False,
                        help='Command line arguments passed to the parent process')
    args = parser.parse_args()

    trigger_types = args.trigger_type_refs
    trigger_types = trigger_types.split(',') if trigger_types else []
    parent_args = json.loads(args.parent_args) if args.parent_args else []
    assert isinstance(parent_args, list)

    obj = SensorWrapper(pack=args.pack,
                        file_path=args.file_path,
                        class_name=args.class_name,
                        trigger_types=trigger_types,
                        poll_interval=args.poll_interval,
                        parent_args=parent_args)
    obj.run()
10 调用
################################
源码文件:st2/st2common/st2common/transport/reactor.py
from kombu import Exchange, Queue

from st2common import log as logging
from st2common.constants.trace import TRACE_CONTEXT
from st2common.models.api.trace import TraceContext
from st2common.transport import publishers
from st2common.transport import utils as transport_utils

__all__ = [
    'TriggerCUDPublisher',
    'TriggerInstancePublisher',

    'TriggerDispatcher',

    'get_sensor_cud_queue',
    'get_trigger_cud_queue',
    'get_trigger_instances_queue'
]

LOG = logging.getLogger(__name__)

# Exchange for Trigger CUD events
TRIGGER_CUD_XCHG = Exchange('st2.trigger', type='topic')

# Exchange for TriggerInstance events
TRIGGER_INSTANCE_XCHG = Exchange('st2.trigger_instances_dispatch', type='topic')

# Exchane for Sensor CUD events
SENSOR_CUD_XCHG = Exchange('st2.sensor', type='topic')


class SensorCUDPublisher(publishers.CUDPublisher):
    """
    Publisher responsible for publishing Trigger model CUD events.
    """

    def __init__(self, urls):
        super(SensorCUDPublisher, self).__init__(urls, SENSOR_CUD_XCHG)


class TriggerCUDPublisher(publishers.CUDPublisher):
    """
    Publisher responsible for publishing Trigger model CUD events.
    """

    def __init__(self, urls):
        super(TriggerCUDPublisher, self).__init__(urls, TRIGGER_CUD_XCHG)


class TriggerInstancePublisher(object):
    def __init__(self, urls):
        self._publisher = publishers.PoolPublisher(urls=urls)

    def publish_trigger(self, payload=None, routing_key=None):
        # TODO: We should use trigger reference as a routing key
        self._publisher.publish(payload, TRIGGER_INSTANCE_XCHG, routing_key)


class TriggerDispatcher(object):
    """
    This trigger dispatcher dispatches trigger instances to a message queue (RabbitMQ).
    """

    def __init__(self, logger=LOG):
        self._publisher = TriggerInstancePublisher(urls=transport_utils.get_messaging_urls())
        self._logger = logger

    def dispatch(self, trigger, payload=None, trace_context=None):
        """
        Method which dispatches the trigger.

        :param trigger: Full name / reference of the trigger.
        :type trigger: ``str`` or ``object``

        :param payload: Trigger payload.
        :type payload: ``dict``

        :param trace_context: Trace context to associate with Trigger.
        :type trace_context: ``TraceContext``
        """
        assert isinstance(payload, (type(None), dict))
        assert isinstance(trace_context, (type(None), TraceContext))

        payload = {
            'trigger': trigger,
            'payload': payload,
            TRACE_CONTEXT: trace_context
        }
        routing_key = 'trigger_instance'

        self._logger.debug('Dispatching trigger (trigger=%s,payload=%s)', trigger, payload)
        self._publisher.publish_trigger(payload=payload, routing_key=routing_key)


def get_trigger_cud_queue(name, routing_key, exclusive=False):
    return Queue(name, TRIGGER_CUD_XCHG, routing_key=routing_key, exclusive=exclusive)


def get_trigger_instances_queue(name, routing_key):
    return Queue(name, TRIGGER_INSTANCE_XCHG, routing_key=routing_key)


def get_sensor_cud_queue(name, routing_key):
    return Queue(name, SENSOR_CUD_XCHG, routing_key=routing_key)


def get_sensor_cud_queue_with_auto_delete(name, routing_key, auto_delete=True):
    return Queue(name, SENSOR_CUD_XCHG, routing_key=routing_key, auto_delete=auto_delete)
11 调用
###################################
源码文件:st2/st2common/st2common/transport/publishers.py
import copy

from kombu import Connection
from kombu.messaging import Producer

from st2common import log as logging
from st2common.transport.connection_retry_wrapper import ConnectionRetryWrapper

ANY_RK = '*'
CREATE_RK = 'create'
UPDATE_RK = 'update'
DELETE_RK = 'delete'

LOG = logging.getLogger(__name__)


class PoolPublisher(object):
    def __init__(self, urls):
        self.pool = Connection(urls, failover_strategy='round-robin').Pool(limit=10)
        self.cluster_size = len(urls)

    def errback(self, exc, interval):
        LOG.error('Rabbitmq connection error: %s', exc.message, exc_info=False)

    def publish(self, payload, exchange, routing_key=''):
        with self.pool.acquire(block=True) as connection:
            retry_wrapper = ConnectionRetryWrapper(cluster_size=self.cluster_size, logger=LOG)

            def do_publish(connection, channel):
                # ProducerPool ends up creating it own ConnectionPool which ends up completely
                # invalidating this ConnectionPool. Also, a ConnectionPool for producer does not
                # really solve any problems for us so better to create a Producer for each
                # publish.
                producer = Producer(channel)
                kwargs = {
                    'body': payload,
                    'exchange': exchange,
                    'routing_key': routing_key,
                    'serializer': 'pickle'
                }
                retry_wrapper.ensured(connection=connection,
                                      obj=producer,
                                      to_ensure_func=producer.publish,
                                      **kwargs)

            retry_wrapper.run(connection=connection, wrapped_callback=do_publish)


class SharedPoolPublishers(object):
    """
    This maintains some shared PoolPublishers. Within a single process the configured AMQP
    server is usually the same. This sharing allows from the same PoolPublisher to be reused
    for publishing purposes. Sharing publishers leads to shared connections.
    """
    shared_publishers = {}

    def get_publisher(self, urls):
        # The publisher_key format here only works because we are aware that urls will be a
        # list of strings. Sorting to end up with the same PoolPublisher regardless of
        # ordering in supplied list.
        urls_copy = copy.copy(urls)
        urls_copy.sort()
        publisher_key = ''.join(urls_copy)
        publisher = self.shared_publishers.get(publisher_key, None)
        if not publisher:
            # Use original urls here to preserve order.
            publisher = PoolPublisher(urls=urls)
            self.shared_publishers[publisher_key] = publisher
        return publisher


class CUDPublisher(object):
    def __init__(self, urls, exchange):
        self._publisher = SharedPoolPublishers().get_publisher(urls=urls)
        self._exchange = exchange

    def publish_create(self, payload):
        self._publisher.publish(payload, self._exchange, CREATE_RK)

    def publish_update(self, payload):
        self._publisher.publish(payload, self._exchange, UPDATE_RK)

    def publish_delete(self, payload):
        self._publisher.publish(payload, self._exchange, DELETE_RK)


class StatePublisherMixin(object):
    def __init__(self, urls, exchange):
        self._state_publisher = SharedPoolPublishers().get_publisher(urls=urls)
        self._state_exchange = exchange

    def publish_state(self, payload, state):
        if not state:
            raise Exception('Unable to publish unassigned state.')

        self._state_publisher.publish(payload, self._state_exchange, state)
12 调用
############################
源码文件:st2/st2common/st2common/services/sensor_watcher.py
import eventlet
from kombu.mixins import ConsumerMixin
from kombu import Connection

from st2common import log as logging
from st2common.transport import reactor, publishers
from st2common.transport import utils as transport_utils
import st2common.util.queues as queue_utils

LOG = logging.getLogger(__name__)


class SensorWatcher(ConsumerMixin):

    def __init__(self, create_handler, update_handler, delete_handler,
                 queue_suffix=None):
        """
        :param create_handler: Function which is called on SensorDB create event.
        :type create_handler: ``callable``

        :param update_handler: Function which is called on SensorDB update event.
        :type update_handler: ``callable``

        :param delete_handler: Function which is called on SensorDB delete event.
        :type delete_handler: ``callable``
        """
        # TODO: Handle sensor type filtering using routing key
        self._create_handler = create_handler
        self._update_handler = update_handler
        self._delete_handler = delete_handler
        self._sensor_watcher_q = self._get_queue(queue_suffix)

        self.connection = None
        self._updates_thread = None

        self._handlers = {
            publishers.CREATE_RK: create_handler,
            publishers.UPDATE_RK: update_handler,
            publishers.DELETE_RK: delete_handler
        }

    def get_consumers(self, Consumer, channel):
        consumers = [Consumer(queues=[self._sensor_watcher_q],
                              accept=['pickle'],
                              callbacks=[self.process_task])]
        return consumers

    def process_task(self, body, message):
        LOG.debug('process_task')
        LOG.debug('     body: %s', body)
        LOG.debug('     message.properties: %s', message.properties)
        LOG.debug('     message.delivery_info: %s', message.delivery_info)

        routing_key = message.delivery_info.get('routing_key', '')
        handler = self._handlers.get(routing_key, None)

        try:
            if not handler:
                LOG.info('Skipping message %s as no handler was found.', message)
                return

            try:
                handler(body)
            except Exception as e:
                LOG.exception('Handling failed. Message body: %s. Exception: %s',
                              body, e.message)
        finally:
            message.ack()

    def start(self):
        try:
            self.connection = Connection(transport_utils.get_messaging_urls())
            self._updates_thread = eventlet.spawn(self.run)
        except:
            LOG.exception('Failed to start sensor_watcher.')
            self.connection.release()

    def stop(self):
        LOG.debug('Shutting down sensor watcher.')
        try:
            if self._updates_thread:
                self._updates_thread = eventlet.kill(self._updates_thread)

            if self.connection:
                channel = self.connection.channel()
                bound_sensor_watch_q = self._sensor_watcher_q(channel)
                try:
                    bound_sensor_watch_q.delete()
                except:
                    LOG.error('Unable to delete sensor watcher queue: %s', self._sensor_watcher_q)
        finally:
            if self.connection:
                self.connection.release()

    @staticmethod
    def _get_queue(queue_suffix):
        queue_name = queue_utils.get_queue_name(queue_name_base='st2.sensor.watch',
                                                queue_name_suffix=queue_suffix,
                                                add_random_uuid_to_suffix=True
                                                )
        # return reactor.get_sensor_cud_queue(queue_name, routing_key='#')
        return reactor.get_sensor_cud_queue_with_auto_delete(queue_name, routing_key='#', auto_delete=True)
参考:
https://github.com/StackStorm/st2/tree/v2.6.0

猜你喜欢

转载自blog.csdn.net/qingyuanluofeng/article/details/89036269
今日推荐