Merge in latest from datadog/dd-trace-py (#1)
This commit is contained in:
		
							parent
							
								
									67f43ea6ab
								
							
						
					
					
						commit
						f13c716af6
					
				
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							|  | @ -0,0 +1 @@ | |||
| * @DataDog/apm-python | ||||
|  | @ -0,0 +1,19 @@ | |||
| Thanks for taking the time for reporting an issue! | ||||
| 
 | ||||
| Before reporting an issue on dd-trace-py, please be sure to provide all | ||||
| necessary information. | ||||
| 
 | ||||
| If you're hitting a bug, make sure that you're using the latest version of this | ||||
| library. | ||||
| 
 | ||||
| ### Which version of dd-trace-py are you using? | ||||
| 
 | ||||
| ### Which version of the libraries are you using? | ||||
| 
 | ||||
| You can copy/paste the output of `pip freeze` here. | ||||
| 
 | ||||
| ### How can we reproduce your problem? | ||||
| 
 | ||||
| ### What is the result that you get? | ||||
| 
 | ||||
| ### What is result that you expected? | ||||
|  | @ -1,5 +1,7 @@ | |||
| # Byte-compiled / optimized / DLL files | ||||
| __pycache__/ | ||||
| *.py[cod] | ||||
| *.sw[op] | ||||
| *$py.class | ||||
| 
 | ||||
| # C extensions | ||||
| *.so | ||||
|  | @ -18,7 +20,6 @@ develop-eggs | |||
| .installed.cfg | ||||
| lib | ||||
| lib64 | ||||
| __pycache__ | ||||
| venv*/ | ||||
| 
 | ||||
| # Installer logs | ||||
|  | @ -55,3 +56,96 @@ _build/ | |||
| # mypy | ||||
| .mypy_cache/ | ||||
| target | ||||
| ======= | ||||
| # Distribution / packaging | ||||
| .Python | ||||
| env/ | ||||
| build/ | ||||
| develop-eggs/ | ||||
| dist/ | ||||
| downloads/ | ||||
| eggs/ | ||||
| .eggs/ | ||||
| lib64/ | ||||
| parts/ | ||||
| sdist/ | ||||
| var/ | ||||
| *.egg-info/ | ||||
| .installed.cfg | ||||
| *.egg | ||||
| *.whl | ||||
| 
 | ||||
| # PyInstaller | ||||
| #  Usually these files are written by a python script from a template | ||||
| #  before PyInstaller builds the exe, so as to inject date/other infos into it. | ||||
| *.manifest | ||||
| *.spec | ||||
| 
 | ||||
| # Installer logs | ||||
| pip-log.txt | ||||
| pip-delete-this-directory.txt | ||||
| 
 | ||||
| # Unit test / coverage reports | ||||
| htmlcov/ | ||||
| .tox/ | ||||
| .ddtox/ | ||||
| .coverage | ||||
| .coverage.* | ||||
| .cache | ||||
| coverage.xml | ||||
| *,cover | ||||
| .hypothesis/ | ||||
| .pytest_cache/ | ||||
| 
 | ||||
| # Translations | ||||
| *.mo | ||||
| *.pot | ||||
| 
 | ||||
| # Django stuff: | ||||
| *.log | ||||
| local_settings.py | ||||
| 
 | ||||
| # Flask stuff: | ||||
| instance/ | ||||
| .webassets-cache | ||||
| 
 | ||||
| # Scrapy stuff: | ||||
| .scrapy | ||||
| 
 | ||||
| # Sphinx documentation | ||||
| docs/_build/ | ||||
| 
 | ||||
| # PyBuilder | ||||
| target/ | ||||
| 
 | ||||
| # IPython Notebook | ||||
| .ipynb_checkpoints | ||||
| 
 | ||||
| # pyenv | ||||
| .python-version | ||||
| 
 | ||||
| # celery beat schedule file | ||||
| celerybeat-schedule | ||||
| 
 | ||||
| # docker-compose env file | ||||
| # it must be versioned to keep track of backing services defaults | ||||
| !.env | ||||
| 
 | ||||
| # virtualenv | ||||
| venv/ | ||||
| ENV/ | ||||
| 
 | ||||
| # Spyder project settings | ||||
| .spyderproject | ||||
| 
 | ||||
| # Rope project settings | ||||
| .ropeproject | ||||
| 
 | ||||
| # Vim | ||||
| *.swp | ||||
| # IDEA | ||||
| .idea/ | ||||
| 
 | ||||
| # VS Code | ||||
| .vscode/ | ||||
| >>>>>>> dd/master | ||||
|  |  | |||
|  | @ -0,0 +1,200 @@ | |||
|                                  Apache License | ||||
|                            Version 2.0, January 2004 | ||||
|                         http://www.apache.org/licenses/ | ||||
| 
 | ||||
|    TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION | ||||
| 
 | ||||
|    1. Definitions. | ||||
| 
 | ||||
|       "License" shall mean the terms and conditions for use, reproduction, | ||||
|       and distribution as defined by Sections 1 through 9 of this document. | ||||
| 
 | ||||
|       "Licensor" shall mean the copyright owner or entity authorized by | ||||
|       the copyright owner that is granting the License. | ||||
| 
 | ||||
|       "Legal Entity" shall mean the union of the acting entity and all | ||||
|       other entities that control, are controlled by, or are under common | ||||
|       control with that entity. For the purposes of this definition, | ||||
|       "control" means (i) the power, direct or indirect, to cause the | ||||
|       direction or management of such entity, whether by contract or | ||||
|       otherwise, or (ii) ownership of fifty percent (50%) or more of the | ||||
|       outstanding shares, or (iii) beneficial ownership of such entity. | ||||
| 
 | ||||
|       "You" (or "Your") shall mean an individual or Legal Entity | ||||
|       exercising permissions granted by this License. | ||||
| 
 | ||||
|       "Source" form shall mean the preferred form for making modifications, | ||||
|       including but not limited to software source code, documentation | ||||
|       source, and configuration files. | ||||
| 
 | ||||
|       "Object" form shall mean any form resulting from mechanical | ||||
|       transformation or translation of a Source form, including but | ||||
|       not limited to compiled object code, generated documentation, | ||||
|       and conversions to other media types. | ||||
| 
 | ||||
|       "Work" shall mean the work of authorship, whether in Source or | ||||
|       Object form, made available under the License, as indicated by a | ||||
|       copyright notice that is included in or attached to the work | ||||
|       (an example is provided in the Appendix below). | ||||
| 
 | ||||
|       "Derivative Works" shall mean any work, whether in Source or Object | ||||
|       form, that is based on (or derived from) the Work and for which the | ||||
|       editorial revisions, annotations, elaborations, or other modifications | ||||
|       represent, as a whole, an original work of authorship. For the purposes | ||||
|       of this License, Derivative Works shall not include works that remain | ||||
|       separable from, or merely link (or bind by name) to the interfaces of, | ||||
|       the Work and Derivative Works thereof. | ||||
| 
 | ||||
|       "Contribution" shall mean any work of authorship, including | ||||
|       the original version of the Work and any modifications or additions | ||||
|       to that Work or Derivative Works thereof, that is intentionally | ||||
|       submitted to Licensor for inclusion in the Work by the copyright owner | ||||
|       or by an individual or Legal Entity authorized to submit on behalf of | ||||
|       the copyright owner. For the purposes of this definition, "submitted" | ||||
|       means any form of electronic, verbal, or written communication sent | ||||
|       to the Licensor or its representatives, including but not limited to | ||||
|       communication on electronic mailing lists, source code control systems, | ||||
|       and issue tracking systems that are managed by, or on behalf of, the | ||||
|       Licensor for the purpose of discussing and improving the Work, but | ||||
|       excluding communication that is conspicuously marked or otherwise | ||||
|       designated in writing by the copyright owner as "Not a Contribution." | ||||
| 
 | ||||
|       "Contributor" shall mean Licensor and any individual or Legal Entity | ||||
|       on behalf of whom a Contribution has been received by Licensor and | ||||
|       subsequently incorporated within the Work. | ||||
| 
 | ||||
|    2. Grant of Copyright License. Subject to the terms and conditions of | ||||
|       this License, each Contributor hereby grants to You a perpetual, | ||||
|       worldwide, non-exclusive, no-charge, royalty-free, irrevocable | ||||
|       copyright license to reproduce, prepare Derivative Works of, | ||||
|       publicly display, publicly perform, sublicense, and distribute the | ||||
|       Work and such Derivative Works in Source or Object form. | ||||
| 
 | ||||
|    3. Grant of Patent License. Subject to the terms and conditions of | ||||
|       this License, each Contributor hereby grants to You a perpetual, | ||||
|       worldwide, non-exclusive, no-charge, royalty-free, irrevocable | ||||
|       (except as stated in this section) patent license to make, have made, | ||||
|       use, offer to sell, sell, import, and otherwise transfer the Work, | ||||
|       where such license applies only to those patent claims licensable | ||||
|       by such Contributor that are necessarily infringed by their | ||||
|       Contribution(s) alone or by combination of their Contribution(s) | ||||
|       with the Work to which such Contribution(s) was submitted. If You | ||||
|       institute patent litigation against any entity (including a | ||||
|       cross-claim or counterclaim in a lawsuit) alleging that the Work | ||||
|       or a Contribution incorporated within the Work constitutes direct | ||||
|       or contributory patent infringement, then any patent licenses | ||||
|       granted to You under this License for that Work shall terminate | ||||
|       as of the date such litigation is filed. | ||||
| 
 | ||||
|    4. Redistribution. You may reproduce and distribute copies of the | ||||
|       Work or Derivative Works thereof in any medium, with or without | ||||
|       modifications, and in Source or Object form, provided that You | ||||
|       meet the following conditions: | ||||
| 
 | ||||
|       (a) You must give any other recipients of the Work or | ||||
|           Derivative Works a copy of this License; and | ||||
| 
 | ||||
|       (b) You must cause any modified files to carry prominent notices | ||||
|           stating that You changed the files; and | ||||
| 
 | ||||
|       (c) You must retain, in the Source form of any Derivative Works | ||||
|           that You distribute, all copyright, patent, trademark, and | ||||
|           attribution notices from the Source form of the Work, | ||||
|           excluding those notices that do not pertain to any part of | ||||
|           the Derivative Works; and | ||||
| 
 | ||||
|       (d) If the Work includes a "NOTICE" text file as part of its | ||||
|           distribution, then any Derivative Works that You distribute must | ||||
|           include a readable copy of the attribution notices contained | ||||
|           within such NOTICE file, excluding those notices that do not | ||||
|           pertain to any part of the Derivative Works, in at least one | ||||
|           of the following places: within a NOTICE text file distributed | ||||
|           as part of the Derivative Works; within the Source form or | ||||
|           documentation, if provided along with the Derivative Works; or, | ||||
|           within a display generated by the Derivative Works, if and | ||||
|           wherever such third-party notices normally appear. The contents | ||||
|           of the NOTICE file are for informational purposes only and | ||||
|           do not modify the License. You may add Your own attribution | ||||
|           notices within Derivative Works that You distribute, alongside | ||||
|           or as an addendum to the NOTICE text from the Work, provided | ||||
|           that such additional attribution notices cannot be construed | ||||
|           as modifying the License. | ||||
| 
 | ||||
|       You may add Your own copyright statement to Your modifications and | ||||
|       may provide additional or different license terms and conditions | ||||
|       for use, reproduction, or distribution of Your modifications, or | ||||
|       for any such Derivative Works as a whole, provided Your use, | ||||
|       reproduction, and distribution of the Work otherwise complies with | ||||
|       the conditions stated in this License. | ||||
| 
 | ||||
|    5. Submission of Contributions. Unless You explicitly state otherwise, | ||||
|       any Contribution intentionally submitted for inclusion in the Work | ||||
|       by You to the Licensor shall be under the terms and conditions of | ||||
|       this License, without any additional terms or conditions. | ||||
|       Notwithstanding the above, nothing herein shall supersede or modify | ||||
|       the terms of any separate license agreement you may have executed | ||||
|       with Licensor regarding such Contributions. | ||||
| 
 | ||||
|    6. Trademarks. This License does not grant permission to use the trade | ||||
|       names, trademarks, service marks, or product names of the Licensor, | ||||
|       except as required for reasonable and customary use in describing the | ||||
|       origin of the Work and reproducing the content of the NOTICE file. | ||||
| 
 | ||||
|    7. Disclaimer of Warranty. Unless required by applicable law or | ||||
|       agreed to in writing, Licensor provides the Work (and each | ||||
|       Contributor provides its Contributions) on an "AS IS" BASIS, | ||||
|       WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or | ||||
|       implied, including, without limitation, any warranties or conditions | ||||
|       of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A | ||||
|       PARTICULAR PURPOSE. You are solely responsible for determining the | ||||
|       appropriateness of using or redistributing the Work and assume any | ||||
|       risks associated with Your exercise of permissions under this License. | ||||
| 
 | ||||
|    8. Limitation of Liability. In no event and under no legal theory, | ||||
|       whether in tort (including negligence), contract, or otherwise, | ||||
|       unless required by applicable law (such as deliberate and grossly | ||||
|       negligent acts) or agreed to in writing, shall any Contributor be | ||||
|       liable to You for damages, including any direct, indirect, special, | ||||
|       incidental, or consequential damages of any character arising as a | ||||
|       result of this License or out of the use or inability to use the | ||||
|       Work (including but not limited to damages for loss of goodwill, | ||||
|       work stoppage, computer failure or malfunction, or any and all | ||||
|       other commercial damages or losses), even if such Contributor | ||||
|       has been advised of the possibility of such damages. | ||||
| 
 | ||||
|    9. Accepting Warranty or Additional Liability. While redistributing | ||||
|       the Work or Derivative Works thereof, You may choose to offer, | ||||
|       and charge a fee for, acceptance of support, warranty, indemnity, | ||||
|       or other liability obligations and/or rights consistent with this | ||||
|       License. However, in accepting such obligations, You may act only | ||||
|       on Your own behalf and on Your sole responsibility, not on behalf | ||||
|       of any other Contributor, and only if You agree to indemnify, | ||||
|       defend, and hold each Contributor harmless for any liability | ||||
|       incurred by, or claims asserted against, such Contributor by reason | ||||
|       of your accepting any such warranty or additional liability. | ||||
| 
 | ||||
|    END OF TERMS AND CONDITIONS | ||||
| 
 | ||||
|    APPENDIX: How to apply the Apache License to your work. | ||||
| 
 | ||||
|       To apply the Apache License to your work, attach the following | ||||
|       boilerplate notice, with the fields enclosed by brackets "{}" | ||||
|       replaced with your own identifying information. (Don't include | ||||
|       the brackets!)  The text should be enclosed in the appropriate | ||||
|       comment syntax for the file format. We also recommend that a | ||||
|       file or class name and description of purpose be included on the | ||||
|       same "printed page" as the copyright notice for easier | ||||
|       identification within third-party archives. | ||||
| 
 | ||||
|    Copyright 2016 Datadog, Inc. | ||||
|    Licensed under the Apache License, Version 2.0 (the "License"); | ||||
|    you may not use this file except in compliance with the License. | ||||
|    You may obtain a copy of the License at | ||||
| 
 | ||||
|        http://www.apache.org/licenses/LICENSE-2.0 | ||||
| 
 | ||||
|    Unless required by applicable law or agreed to in writing, software | ||||
|    distributed under the License is distributed on an "AS IS" BASIS, | ||||
|    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
|    See the License for the specific language governing permissions and | ||||
|    limitations under the License. | ||||
|  | @ -0,0 +1,24 @@ | |||
| Copyright (c) 2016, Datadog <info@datadoghq.com> | ||||
| All rights reserved. | ||||
| 
 | ||||
| Redistribution and use in source and binary forms, with or without | ||||
| modification, are permitted provided that the following conditions are met: | ||||
|     * Redistributions of source code must retain the above copyright | ||||
|       notice, this list of conditions and the following disclaimer. | ||||
|     * Redistributions in binary form must reproduce the above copyright | ||||
|       notice, this list of conditions and the following disclaimer in the | ||||
|       documentation and/or other materials provided with the distribution. | ||||
|     * Neither the name of Datadog nor the | ||||
|       names of its contributors may be used to endorse or promote products | ||||
|       derived from this software without specific prior written permission. | ||||
| 
 | ||||
| THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND | ||||
| ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | ||||
| WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | ||||
| DISCLAIMED. IN NO EVENT SHALL DATADOG BE LIABLE FOR ANY | ||||
| DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | ||||
| (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | ||||
| LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | ||||
| ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||||
| (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | ||||
| SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||||
|  | @ -0,0 +1,4 @@ | |||
| Datadog dd-trace-py | ||||
| Copyright 2016-Present Datadog, Inc. | ||||
| 
 | ||||
| This product includes software developed at Datadog, Inc. (https://www.datadoghq.com/). | ||||
|  | @ -0,0 +1,77 @@ | |||
| desc "build the docs" | ||||
| task :docs do | ||||
|     sh "pip install sphinx" | ||||
|   Dir.chdir 'docs' do | ||||
|     sh "make html" | ||||
|   end | ||||
| end | ||||
| 
 | ||||
| # Deploy tasks | ||||
| S3_DIR = ENV['S3_DIR'] | ||||
| S3_BUCKET = "pypi.datadoghq.com" | ||||
| 
 | ||||
| desc "release the a new wheel" | ||||
| task :'release:wheel' do | ||||
|   fail "Missing environment variable S3_DIR" if !S3_DIR or S3_DIR.empty? | ||||
| 
 | ||||
|   # Use custom `mkwheelhouse` to upload wheels and source distribution from dist/ to S3 bucket | ||||
|   sh "scripts/mkwheelhouse" | ||||
| end | ||||
| 
 | ||||
| desc "release the docs website" | ||||
| task :'release:docs' => :docs do | ||||
|   fail "Missing environment variable S3_DIR" if !S3_DIR or S3_DIR.empty? | ||||
|   sh "aws s3 cp --recursive docs/_build/html/ s3://#{S3_BUCKET}/#{S3_DIR}/docs/" | ||||
| end | ||||
| 
 | ||||
| namespace :pypi do | ||||
|   RELEASE_DIR = './dist/' | ||||
| 
 | ||||
|   def get_version() | ||||
|     return `python setup.py --version`.strip | ||||
|   end | ||||
| 
 | ||||
|   def get_branch() | ||||
|     return `git name-rev --name-only HEAD`.strip | ||||
|   end | ||||
| 
 | ||||
|   task :confirm do | ||||
|     ddtrace_version = get_version | ||||
| 
 | ||||
|     if get_branch.downcase != 'tags/v#{ddtrace_version}' | ||||
|       print "WARNING: Expected current commit to be tagged as 'tags/v#{ddtrace_version}, instead we are on '#{get_branch}', proceed anyways [y|N]? " | ||||
|       $stdout.flush | ||||
| 
 | ||||
|       abort if $stdin.gets.to_s.strip.downcase != 'y' | ||||
|     end | ||||
| 
 | ||||
|     puts "WARNING: This task will build and release new wheels to https://pypi.org/project/ddtrace/, this action cannot be undone" | ||||
|     print "         To proceed please type the version '#{ddtrace_version}': " | ||||
|     $stdout.flush | ||||
| 
 | ||||
|     abort if $stdin.gets.to_s.strip.downcase != ddtrace_version | ||||
|   end | ||||
| 
 | ||||
|   task :clean do | ||||
|     FileUtils.rm_rf(RELEASE_DIR) | ||||
|   end | ||||
| 
 | ||||
|   task :install do | ||||
|     sh 'pip install twine' | ||||
|   end | ||||
| 
 | ||||
|   task :build => :clean do | ||||
|     puts "building release in #{RELEASE_DIR}" | ||||
|     sh "scripts/build-dist" | ||||
|   end | ||||
| 
 | ||||
|   task :release => [:confirm, :install, :build] do | ||||
|     builds = Dir.entries(RELEASE_DIR).reject {|f| f == '.' || f == '..'} | ||||
|     if builds.length == 0 | ||||
|         fail "no build found in #{RELEASE_DIR}" | ||||
|     end | ||||
| 
 | ||||
|     puts "uploading #{RELEASE_DIR}/*" | ||||
|     sh "twine upload #{RELEASE_DIR}/*" | ||||
|   end | ||||
| end | ||||
|  | @ -0,0 +1,55 @@ | |||
| """ | ||||
| This file configures a local pytest plugin, which allows us to configure plugin hooks to control the | ||||
| execution of our tests. Either by loading in fixtures, configuring directories to ignore, etc | ||||
| 
 | ||||
| Local plugins: https://docs.pytest.org/en/3.10.1/writing_plugins.html#local-conftest-plugins | ||||
| Hook reference: https://docs.pytest.org/en/3.10.1/reference.html#hook-reference | ||||
| """ | ||||
| import os | ||||
| import re | ||||
| import sys | ||||
| 
 | ||||
| import pytest | ||||
| 
 | ||||
| PY_DIR_PATTERN = re.compile(r"^py[23][0-9]$") | ||||
| 
 | ||||
| 
 | ||||
| # Determine if the folder should be ignored | ||||
| # https://docs.pytest.org/en/3.10.1/reference.html#_pytest.hookspec.pytest_ignore_collect | ||||
| # DEV: We can only ignore folders/modules, we cannot ignore individual files | ||||
| # DEV: We must wrap with `@pytest.mark.hookwrapper` to inherit from default (e.g. honor `--ignore`) | ||||
| #      https://github.com/pytest-dev/pytest/issues/846#issuecomment-122129189 | ||||
| @pytest.mark.hookwrapper | ||||
| def pytest_ignore_collect(path, config): | ||||
|     """ | ||||
|     Skip directories defining a required minimum Python version | ||||
| 
 | ||||
|     Example:: | ||||
| 
 | ||||
|         File: tests/contrib/vertica/py35/test.py | ||||
|         Python 2.7: Skip | ||||
|         Python 3.4: Skip | ||||
|         Python 3.5: Collect | ||||
|         Python 3.6: Collect | ||||
|     """ | ||||
|     # Execute original behavior first | ||||
|     # DEV: We need to set `outcome.force_result(True)` if we need to override | ||||
|     #      these results and skip this directory | ||||
|     outcome = yield | ||||
| 
 | ||||
|     # Was not ignored by default behavior | ||||
|     if not outcome.get_result(): | ||||
|         # DEV: `path` is a `LocalPath` | ||||
|         path = str(path) | ||||
|         if not os.path.isdir(path): | ||||
|             path = os.path.dirname(path) | ||||
|         dirname = os.path.basename(path) | ||||
| 
 | ||||
|         # Directory name match `py[23][0-9]` | ||||
|         if PY_DIR_PATTERN.match(dirname): | ||||
|             # Split out version numbers into a tuple: `py35` -> `(3, 5)` | ||||
|             min_required = tuple((int(v) for v in dirname.strip("py"))) | ||||
| 
 | ||||
|             # If the current Python version does not meet the minimum required, skip this directory | ||||
|             if sys.version_info[0:2] < min_required: | ||||
|                 outcome.force_result(True) | ||||
|  | @ -0,0 +1,51 @@ | |||
| import sys | ||||
| 
 | ||||
| import pkg_resources | ||||
| 
 | ||||
| from .monkey import patch, patch_all | ||||
| from .pin import Pin | ||||
| from .span import Span | ||||
| from .tracer import Tracer | ||||
| from .settings import config | ||||
| 
 | ||||
| 
 | ||||
| try: | ||||
|     __version__ = pkg_resources.get_distribution(__name__).version | ||||
| except pkg_resources.DistributionNotFound: | ||||
|     # package is not installed | ||||
|     __version__ = None | ||||
| 
 | ||||
| 
 | ||||
| # a global tracer instance with integration settings | ||||
| tracer = Tracer() | ||||
| 
 | ||||
| __all__ = [ | ||||
|     'patch', | ||||
|     'patch_all', | ||||
|     'Pin', | ||||
|     'Span', | ||||
|     'tracer', | ||||
|     'Tracer', | ||||
|     'config', | ||||
| ] | ||||
| 
 | ||||
| 
 | ||||
| _ORIGINAL_EXCEPTHOOK = sys.excepthook | ||||
| 
 | ||||
| 
 | ||||
| def _excepthook(tp, value, traceback): | ||||
|     tracer.global_excepthook(tp, value, traceback) | ||||
|     if _ORIGINAL_EXCEPTHOOK: | ||||
|         return _ORIGINAL_EXCEPTHOOK(tp, value, traceback) | ||||
| 
 | ||||
| 
 | ||||
| def install_excepthook(): | ||||
|     """Install a hook that intercepts unhandled exception and send metrics about them.""" | ||||
|     global _ORIGINAL_EXCEPTHOOK | ||||
|     _ORIGINAL_EXCEPTHOOK = sys.excepthook | ||||
|     sys.excepthook = _excepthook | ||||
| 
 | ||||
| 
 | ||||
| def uninstall_excepthook(): | ||||
|     """Uninstall the global tracer except hook.""" | ||||
|     sys.excepthook = _ORIGINAL_EXCEPTHOOK | ||||
|  | @ -0,0 +1,82 @@ | |||
| import atexit | ||||
| import threading | ||||
| import os | ||||
| 
 | ||||
| from .internal.logger import get_logger | ||||
| 
 | ||||
| _LOG = get_logger(__name__) | ||||
| 
 | ||||
| 
 | ||||
| class PeriodicWorkerThread(object): | ||||
|     """Periodic worker thread. | ||||
| 
 | ||||
|     This class can be used to instantiate a worker thread that will run its `run_periodic` function every `interval` | ||||
|     seconds. | ||||
| 
 | ||||
|     The method `on_shutdown` will be called on worker shutdown. The worker will be shutdown when the program exits and | ||||
|     can be waited for with the `exit_timeout` parameter. | ||||
| 
 | ||||
|     """ | ||||
| 
 | ||||
|     _DEFAULT_INTERVAL = 1.0 | ||||
| 
 | ||||
|     def __init__(self, interval=_DEFAULT_INTERVAL, exit_timeout=None, name=None, daemon=True): | ||||
|         """Create a new worker thread that runs a function periodically. | ||||
| 
 | ||||
|         :param interval: The interval in seconds to wait between calls to `run_periodic`. | ||||
|         :param exit_timeout: The timeout to use when exiting the program and waiting for the thread to finish. | ||||
|         :param name: Name of the worker. | ||||
|         :param daemon: Whether the worker should be a daemon. | ||||
|         """ | ||||
| 
 | ||||
|         self._thread = threading.Thread(target=self._target, name=name) | ||||
|         self._thread.daemon = daemon | ||||
|         self._stop = threading.Event() | ||||
|         self.interval = interval | ||||
|         self.exit_timeout = exit_timeout | ||||
|         atexit.register(self._atexit) | ||||
| 
 | ||||
|     def _atexit(self): | ||||
|         self.stop() | ||||
|         if self.exit_timeout is not None: | ||||
|             key = 'ctrl-break' if os.name == 'nt' else 'ctrl-c' | ||||
|             _LOG.debug( | ||||
|                 'Waiting %d seconds for %s to finish. Hit %s to quit.', | ||||
|                 self.exit_timeout, self._thread.name, key, | ||||
|             ) | ||||
|             self.join(self.exit_timeout) | ||||
| 
 | ||||
|     def start(self): | ||||
|         """Start the periodic worker.""" | ||||
|         _LOG.debug('Starting %s thread', self._thread.name) | ||||
|         self._thread.start() | ||||
| 
 | ||||
|     def stop(self): | ||||
|         """Stop the worker.""" | ||||
|         _LOG.debug('Stopping %s thread', self._thread.name) | ||||
|         self._stop.set() | ||||
| 
 | ||||
|     def is_alive(self): | ||||
|         return self._thread.is_alive() | ||||
| 
 | ||||
|     def join(self, timeout=None): | ||||
|         return self._thread.join(timeout) | ||||
| 
 | ||||
|     def _target(self): | ||||
|         while not self._stop.wait(self.interval): | ||||
|             self.run_periodic() | ||||
|         self._on_shutdown() | ||||
| 
 | ||||
|     @staticmethod | ||||
|     def run_periodic(): | ||||
|         """Method executed every interval.""" | ||||
|         pass | ||||
| 
 | ||||
|     def _on_shutdown(self): | ||||
|         _LOG.debug('Shutting down %s thread', self._thread.name) | ||||
|         self.on_shutdown() | ||||
| 
 | ||||
|     @staticmethod | ||||
|     def on_shutdown(): | ||||
|         """Method ran on worker shutdown.""" | ||||
|         pass | ||||
|  | @ -0,0 +1,279 @@ | |||
| # stdlib | ||||
| import ddtrace | ||||
| from json import loads | ||||
| import socket | ||||
| 
 | ||||
| # project | ||||
| from .encoding import get_encoder, JSONEncoder | ||||
| from .compat import httplib, PYTHON_VERSION, PYTHON_INTERPRETER, get_connection_response | ||||
| from .internal.logger import get_logger | ||||
| from .internal.runtime import container | ||||
| from .payload import Payload, PayloadFull | ||||
| from .utils.deprecation import deprecated | ||||
| from .utils import time | ||||
| 
 | ||||
| 
 | ||||
| log = get_logger(__name__) | ||||
| 
 | ||||
| 
 | ||||
| _VERSIONS = {'v0.4': {'traces': '/v0.4/traces', | ||||
|                       'services': '/v0.4/services', | ||||
|                       'compatibility_mode': False, | ||||
|                       'fallback': 'v0.3'}, | ||||
|              'v0.3': {'traces': '/v0.3/traces', | ||||
|                       'services': '/v0.3/services', | ||||
|                       'compatibility_mode': False, | ||||
|                       'fallback': 'v0.2'}, | ||||
|              'v0.2': {'traces': '/v0.2/traces', | ||||
|                       'services': '/v0.2/services', | ||||
|                       'compatibility_mode': True, | ||||
|                       'fallback': None}} | ||||
| 
 | ||||
| 
 | ||||
| class Response(object): | ||||
|     """ | ||||
|     Custom API Response object to represent a response from calling the API. | ||||
| 
 | ||||
|     We do this to ensure we know expected properties will exist, and so we | ||||
|     can call `resp.read()` and load the body once into an instance before we | ||||
|     close the HTTPConnection used for the request. | ||||
|     """ | ||||
|     __slots__ = ['status', 'body', 'reason', 'msg'] | ||||
| 
 | ||||
|     def __init__(self, status=None, body=None, reason=None, msg=None): | ||||
|         self.status = status | ||||
|         self.body = body | ||||
|         self.reason = reason | ||||
|         self.msg = msg | ||||
| 
 | ||||
|     @classmethod | ||||
|     def from_http_response(cls, resp): | ||||
|         """ | ||||
|         Build a ``Response`` from the provided ``HTTPResponse`` object. | ||||
| 
 | ||||
|         This function will call `.read()` to consume the body of the ``HTTPResponse`` object. | ||||
| 
 | ||||
|         :param resp: ``HTTPResponse`` object to build the ``Response`` from | ||||
|         :type resp: ``HTTPResponse`` | ||||
|         :rtype: ``Response`` | ||||
|         :returns: A new ``Response`` | ||||
|         """ | ||||
|         return cls( | ||||
|             status=resp.status, | ||||
|             body=resp.read(), | ||||
|             reason=getattr(resp, 'reason', None), | ||||
|             msg=getattr(resp, 'msg', None), | ||||
|         ) | ||||
| 
 | ||||
|     def get_json(self): | ||||
|         """Helper to parse the body of this request as JSON""" | ||||
|         try: | ||||
|             body = self.body | ||||
|             if not body: | ||||
|                 log.debug('Empty reply from Datadog Agent, %r', self) | ||||
|                 return | ||||
| 
 | ||||
|             if not isinstance(body, str) and hasattr(body, 'decode'): | ||||
|                 body = body.decode('utf-8') | ||||
| 
 | ||||
|             if hasattr(body, 'startswith') and body.startswith('OK'): | ||||
|                 # This typically happens when using a priority-sampling enabled | ||||
|                 # library with an outdated agent. It still works, but priority sampling | ||||
|                 # will probably send too many traces, so the next step is to upgrade agent. | ||||
|                 log.debug('Cannot parse Datadog Agent response, please make sure your Datadog Agent is up to date') | ||||
|                 return | ||||
| 
 | ||||
|             return loads(body) | ||||
|         except (ValueError, TypeError): | ||||
|             log.debug('Unable to parse Datadog Agent JSON response: %r', body, exc_info=True) | ||||
| 
 | ||||
|     def __repr__(self): | ||||
|         return '{0}(status={1!r}, body={2!r}, reason={3!r}, msg={4!r})'.format( | ||||
|             self.__class__.__name__, | ||||
|             self.status, | ||||
|             self.body, | ||||
|             self.reason, | ||||
|             self.msg, | ||||
|         ) | ||||
| 
 | ||||
| 
 | ||||
| class UDSHTTPConnection(httplib.HTTPConnection): | ||||
|     """An HTTP connection established over a Unix Domain Socket.""" | ||||
| 
 | ||||
|     # It's "important" to keep the hostname and port arguments here; while there are not used by the connection | ||||
|     # mechanism, they are actually used as HTTP headers such as `Host`. | ||||
|     def __init__(self, path, https, *args, **kwargs): | ||||
|         if https: | ||||
|             httplib.HTTPSConnection.__init__(self, *args, **kwargs) | ||||
|         else: | ||||
|             httplib.HTTPConnection.__init__(self, *args, **kwargs) | ||||
|         self.path = path | ||||
| 
 | ||||
|     def connect(self): | ||||
|         sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) | ||||
|         sock.connect(self.path) | ||||
|         self.sock = sock | ||||
| 
 | ||||
| 
 | ||||
| class API(object): | ||||
|     """ | ||||
|     Send data to the trace agent using the HTTP protocol and JSON format | ||||
|     """ | ||||
| 
 | ||||
|     TRACE_COUNT_HEADER = 'X-Datadog-Trace-Count' | ||||
| 
 | ||||
|     # Default timeout when establishing HTTP connection and sending/receiving from socket. | ||||
|     # This ought to be enough as the agent is local | ||||
|     TIMEOUT = 2 | ||||
| 
 | ||||
|     def __init__(self, hostname, port, uds_path=None, https=False, headers=None, encoder=None, priority_sampling=False): | ||||
|         """Create a new connection to the Tracer API. | ||||
| 
 | ||||
|         :param hostname: The hostname. | ||||
|         :param port: The TCP port to use. | ||||
|         :param uds_path: The path to use if the connection is to be established with a Unix Domain Socket. | ||||
|         :param headers: The headers to pass along the request. | ||||
|         :param encoder: The encoder to use to serialize data. | ||||
|         :param priority_sampling: Whether to use priority sampling. | ||||
|         """ | ||||
|         self.hostname = hostname | ||||
|         self.port = int(port) | ||||
|         self.uds_path = uds_path | ||||
|         self.https = https | ||||
| 
 | ||||
|         self._headers = headers or {} | ||||
|         self._version = None | ||||
| 
 | ||||
|         if priority_sampling: | ||||
|             self._set_version('v0.4', encoder=encoder) | ||||
|         else: | ||||
|             self._set_version('v0.3', encoder=encoder) | ||||
| 
 | ||||
|         self._headers.update({ | ||||
|             'Datadog-Meta-Lang': 'python', | ||||
|             'Datadog-Meta-Lang-Version': PYTHON_VERSION, | ||||
|             'Datadog-Meta-Lang-Interpreter': PYTHON_INTERPRETER, | ||||
|             'Datadog-Meta-Tracer-Version': ddtrace.__version__, | ||||
|         }) | ||||
| 
 | ||||
|         # Add container information if we have it | ||||
|         self._container_info = container.get_container_info() | ||||
|         if self._container_info and self._container_info.container_id: | ||||
|             self._headers.update({ | ||||
|                 'Datadog-Container-Id': self._container_info.container_id, | ||||
|             }) | ||||
| 
 | ||||
|     def __str__(self): | ||||
|         if self.uds_path: | ||||
|             return 'unix://' + self.uds_path | ||||
|         if self.https: | ||||
|             scheme = 'https://' | ||||
|         else: | ||||
|             scheme = 'http://' | ||||
|         return '%s%s:%s' % (scheme, self.hostname, self.port) | ||||
| 
 | ||||
|     def _set_version(self, version, encoder=None): | ||||
|         if version not in _VERSIONS: | ||||
|             version = 'v0.2' | ||||
|         if version == self._version: | ||||
|             return | ||||
|         self._version = version | ||||
|         self._traces = _VERSIONS[version]['traces'] | ||||
|         self._services = _VERSIONS[version]['services'] | ||||
|         self._fallback = _VERSIONS[version]['fallback'] | ||||
|         self._compatibility_mode = _VERSIONS[version]['compatibility_mode'] | ||||
|         if self._compatibility_mode: | ||||
|             self._encoder = JSONEncoder() | ||||
|         else: | ||||
|             self._encoder = encoder or get_encoder() | ||||
|         # overwrite the Content-type with the one chosen in the Encoder | ||||
|         self._headers.update({'Content-Type': self._encoder.content_type}) | ||||
| 
 | ||||
|     def _downgrade(self): | ||||
|         """ | ||||
|         Downgrades the used encoder and API level. This method must fallback to a safe | ||||
|         encoder and API, so that it will success despite users' configurations. This action | ||||
|         ensures that the compatibility mode is activated so that the downgrade will be | ||||
|         executed only once. | ||||
|         """ | ||||
|         self._set_version(self._fallback) | ||||
| 
 | ||||
|     def send_traces(self, traces): | ||||
|         """Send traces to the API. | ||||
| 
 | ||||
|         :param traces: A list of traces. | ||||
|         :return: The list of API HTTP responses. | ||||
|         """ | ||||
|         if not traces: | ||||
|             return [] | ||||
| 
 | ||||
|         with time.StopWatch() as sw: | ||||
|             responses = [] | ||||
|             payload = Payload(encoder=self._encoder) | ||||
|             for trace in traces: | ||||
|                 try: | ||||
|                     payload.add_trace(trace) | ||||
|                 except PayloadFull: | ||||
|                     # Is payload full or is the trace too big? | ||||
|                     # If payload is not empty, then using a new Payload might allow us to fit the trace. | ||||
|                     # Let's flush the Payload and try to put the trace in a new empty Payload. | ||||
|                     if not payload.empty: | ||||
|                         responses.append(self._flush(payload)) | ||||
|                         # Create a new payload | ||||
|                         payload = Payload(encoder=self._encoder) | ||||
|                         try: | ||||
|                             # Add the trace that we were unable to add in that iteration | ||||
|                             payload.add_trace(trace) | ||||
|                         except PayloadFull: | ||||
|                             # If the trace does not fit in a payload on its own, that's bad. Drop it. | ||||
|                             log.warning('Trace %r is too big to fit in a payload, dropping it', trace) | ||||
| 
 | ||||
|             # Check that the Payload is not empty: | ||||
|             # it could be empty if the last trace was too big to fit. | ||||
|             if not payload.empty: | ||||
|                 responses.append(self._flush(payload)) | ||||
| 
 | ||||
|         log.debug('reported %d traces in %.5fs', len(traces), sw.elapsed()) | ||||
| 
 | ||||
|         return responses | ||||
| 
 | ||||
|     def _flush(self, payload): | ||||
|         try: | ||||
|             response = self._put(self._traces, payload.get_payload(), payload.length) | ||||
|         except (httplib.HTTPException, OSError, IOError) as e: | ||||
|             return e | ||||
| 
 | ||||
|         # the API endpoint is not available so we should downgrade the connection and re-try the call | ||||
|         if response.status in [404, 415] and self._fallback: | ||||
|             log.debug("calling endpoint '%s' but received %s; downgrading API", self._traces, response.status) | ||||
|             self._downgrade() | ||||
|             return self._flush(payload) | ||||
| 
 | ||||
|         return response | ||||
| 
 | ||||
|     @deprecated(message='Sending services to the API is no longer necessary', version='1.0.0') | ||||
|     def send_services(self, *args, **kwargs): | ||||
|         return | ||||
| 
 | ||||
|     def _put(self, endpoint, data, count): | ||||
|         headers = self._headers.copy() | ||||
|         headers[self.TRACE_COUNT_HEADER] = str(count) | ||||
| 
 | ||||
|         if self.uds_path is None: | ||||
|             if self.https: | ||||
|                 conn = httplib.HTTPSConnection(self.hostname, self.port, timeout=self.TIMEOUT) | ||||
|             else: | ||||
|                 conn = httplib.HTTPConnection(self.hostname, self.port, timeout=self.TIMEOUT) | ||||
|         else: | ||||
|             conn = UDSHTTPConnection(self.uds_path, self.https, self.hostname, self.port, timeout=self.TIMEOUT) | ||||
| 
 | ||||
|         try: | ||||
|             conn.request('PUT', endpoint, data, headers) | ||||
| 
 | ||||
|             # Parse the HTTPResponse into an API.Response | ||||
|             # DEV: This will call `resp.read()` which must happen before the `conn.close()` below, | ||||
|             #      if we call `.close()` then all future `.read()` calls will return `b''` | ||||
|             resp = get_connection_response(conn) | ||||
|             return Response.from_http_response(resp) | ||||
|         finally: | ||||
|             conn.close() | ||||
|  | @ -0,0 +1,150 @@ | |||
| """ | ||||
| Bootstrapping code that is run when using the `ddtrace-run` Python entrypoint | ||||
| Add all monkey-patching that needs to run by default here | ||||
| """ | ||||
| 
 | ||||
| import os | ||||
| import imp | ||||
| import sys | ||||
| import logging | ||||
| 
 | ||||
| from ddtrace.utils.formats import asbool, get_env | ||||
| from ddtrace.internal.logger import get_logger | ||||
| from ddtrace import constants | ||||
| 
 | ||||
| logs_injection = asbool(get_env("logs", "injection")) | ||||
| DD_LOG_FORMAT = "%(asctime)s %(levelname)s [%(name)s] [%(filename)s:%(lineno)d] {}- %(message)s".format( | ||||
|     "[dd.trace_id=%(dd.trace_id)s dd.span_id=%(dd.span_id)s] " if logs_injection else "" | ||||
| ) | ||||
| 
 | ||||
| if logs_injection: | ||||
|     # immediately patch logging if trace id injected | ||||
|     from ddtrace import patch | ||||
| 
 | ||||
|     patch(logging=True) | ||||
| 
 | ||||
| debug = os.environ.get("DATADOG_TRACE_DEBUG") | ||||
| 
 | ||||
| # Set here a default logging format for basicConfig | ||||
| 
 | ||||
| # DEV: Once basicConfig is called here, future calls to it cannot be used to | ||||
| # change the formatter since it applies the formatter to the root handler only | ||||
| # upon initializing it the first time. | ||||
| # See https://github.com/python/cpython/blob/112e4afd582515fcdcc0cde5012a4866e5cfda12/Lib/logging/__init__.py#L1550 | ||||
| if debug and debug.lower() == "true": | ||||
|     logging.basicConfig(level=logging.DEBUG, format=DD_LOG_FORMAT) | ||||
| else: | ||||
|     logging.basicConfig(format=DD_LOG_FORMAT) | ||||
| 
 | ||||
| log = get_logger(__name__) | ||||
| 
 | ||||
| EXTRA_PATCHED_MODULES = { | ||||
|     "bottle": True, | ||||
|     "django": True, | ||||
|     "falcon": True, | ||||
|     "flask": True, | ||||
|     "pylons": True, | ||||
|     "pyramid": True, | ||||
| } | ||||
| 
 | ||||
| 
 | ||||
| def update_patched_modules(): | ||||
|     modules_to_patch = os.environ.get("DATADOG_PATCH_MODULES") | ||||
|     if not modules_to_patch: | ||||
|         return | ||||
|     for patch in modules_to_patch.split(","): | ||||
|         if len(patch.split(":")) != 2: | ||||
|             log.debug("skipping malformed patch instruction") | ||||
|             continue | ||||
| 
 | ||||
|         module, should_patch = patch.split(":") | ||||
|         if should_patch.lower() not in ["true", "false"]: | ||||
|             log.debug("skipping malformed patch instruction for %s", module) | ||||
|             continue | ||||
| 
 | ||||
|         EXTRA_PATCHED_MODULES.update({module: should_patch.lower() == "true"}) | ||||
| 
 | ||||
| 
 | ||||
| def add_global_tags(tracer): | ||||
|     tags = {} | ||||
|     for tag in os.environ.get("DD_TRACE_GLOBAL_TAGS", "").split(","): | ||||
|         tag_name, _, tag_value = tag.partition(":") | ||||
|         if not tag_name or not tag_value: | ||||
|             log.debug("skipping malformed tracer tag") | ||||
|             continue | ||||
| 
 | ||||
|         tags[tag_name] = tag_value | ||||
|     tracer.set_tags(tags) | ||||
| 
 | ||||
| 
 | ||||
| try: | ||||
|     from ddtrace import tracer | ||||
| 
 | ||||
|     patch = True | ||||
| 
 | ||||
|     # Respect DATADOG_* environment variables in global tracer configuration | ||||
|     # TODO: these variables are deprecated; use utils method and update our documentation | ||||
|     # correct prefix should be DD_* | ||||
|     enabled = os.environ.get("DATADOG_TRACE_ENABLED") | ||||
|     hostname = os.environ.get("DD_AGENT_HOST", os.environ.get("DATADOG_TRACE_AGENT_HOSTNAME")) | ||||
|     port = os.environ.get("DATADOG_TRACE_AGENT_PORT") | ||||
|     priority_sampling = os.environ.get("DATADOG_PRIORITY_SAMPLING") | ||||
| 
 | ||||
|     opts = {} | ||||
| 
 | ||||
|     if enabled and enabled.lower() == "false": | ||||
|         opts["enabled"] = False | ||||
|         patch = False | ||||
|     if hostname: | ||||
|         opts["hostname"] = hostname | ||||
|     if port: | ||||
|         opts["port"] = int(port) | ||||
|     if priority_sampling: | ||||
|         opts["priority_sampling"] = asbool(priority_sampling) | ||||
| 
 | ||||
|     opts["collect_metrics"] = asbool(get_env("runtime_metrics", "enabled")) | ||||
| 
 | ||||
|     if opts: | ||||
|         tracer.configure(**opts) | ||||
| 
 | ||||
|     if logs_injection: | ||||
|         EXTRA_PATCHED_MODULES.update({"logging": True}) | ||||
| 
 | ||||
|     if patch: | ||||
|         update_patched_modules() | ||||
|         from ddtrace import patch_all | ||||
| 
 | ||||
|         patch_all(**EXTRA_PATCHED_MODULES) | ||||
| 
 | ||||
|     if "DATADOG_ENV" in os.environ: | ||||
|         tracer.set_tags({constants.ENV_KEY: os.environ["DATADOG_ENV"]}) | ||||
| 
 | ||||
|     if "DD_TRACE_GLOBAL_TAGS" in os.environ: | ||||
|         add_global_tags(tracer) | ||||
| 
 | ||||
|     # Ensure sitecustomize.py is properly called if available in application directories: | ||||
|     # * exclude `bootstrap_dir` from the search | ||||
|     # * find a user `sitecustomize.py` module | ||||
|     # * import that module via `imp` | ||||
|     bootstrap_dir = os.path.dirname(__file__) | ||||
|     path = list(sys.path) | ||||
| 
 | ||||
|     if bootstrap_dir in path: | ||||
|         path.remove(bootstrap_dir) | ||||
| 
 | ||||
|     try: | ||||
|         (f, path, description) = imp.find_module("sitecustomize", path) | ||||
|     except ImportError: | ||||
|         pass | ||||
|     else: | ||||
|         # `sitecustomize.py` found, load it | ||||
|         log.debug("sitecustomize from user found in: %s", path) | ||||
|         imp.load_module("sitecustomize", f, path, description) | ||||
| 
 | ||||
|     # Loading status used in tests to detect if the `sitecustomize` has been | ||||
|     # properly loaded without exceptions. This must be the last action in the module | ||||
|     # when the execution ends with a success. | ||||
|     loaded = True | ||||
| except Exception: | ||||
|     loaded = False | ||||
|     log.warning("error configuring Datadog tracing", exc_info=True) | ||||
|  | @ -0,0 +1,82 @@ | |||
| #!/usr/bin/env python | ||||
| from distutils import spawn | ||||
| import os | ||||
| import sys | ||||
| import logging | ||||
| 
 | ||||
| debug = os.environ.get('DATADOG_TRACE_DEBUG') | ||||
| if debug and debug.lower() == 'true': | ||||
|     logging.basicConfig(level=logging.DEBUG) | ||||
| 
 | ||||
| # Do not use `ddtrace.internal.logger.get_logger` here | ||||
| # DEV: It isn't really necessary to use `DDLogger` here so we want to | ||||
| #        defer importing `ddtrace` until we actually need it. | ||||
| #      As well, no actual rate limiting would apply here since we only | ||||
| #        have a few logged lines | ||||
| log = logging.getLogger(__name__) | ||||
| 
 | ||||
| USAGE = """ | ||||
| Execute the given Python program after configuring it to emit Datadog traces. | ||||
| Append command line arguments to your program as usual. | ||||
| 
 | ||||
| Usage: [ENV_VARS] ddtrace-run <my_program> | ||||
| 
 | ||||
| Available environment variables: | ||||
| 
 | ||||
|     DATADOG_ENV : override an application's environment (no default) | ||||
|     DATADOG_TRACE_ENABLED=true|false : override the value of tracer.enabled (default: true) | ||||
|     DATADOG_TRACE_DEBUG=true|false : enabled debug logging (default: false) | ||||
|     DATADOG_PATCH_MODULES=module:patch,module:patch... e.g. boto:true,redis:false : override the modules patched for this execution of the program (default: none) | ||||
|     DATADOG_TRACE_AGENT_HOSTNAME=localhost: override the address of the trace agent host that the default tracer will attempt to submit to  (default: localhost) | ||||
|     DATADOG_TRACE_AGENT_PORT=8126: override the port that the default tracer will submit to (default: 8126) | ||||
|     DATADOG_SERVICE_NAME : override the service name to be used for this program (no default) | ||||
|                            This value is passed through when setting up middleware for web framework integrations. | ||||
|                            (e.g. pylons, flask, django) | ||||
|                            For tracing without a web integration, prefer setting the service name in code. | ||||
|     DATADOG_PRIORITY_SAMPLING=true|false : (default: false): enables Priority Sampling. | ||||
| """  # noqa: E501 | ||||
| 
 | ||||
| 
 | ||||
| def _ddtrace_root(): | ||||
|     from ddtrace import __file__ | ||||
|     return os.path.dirname(__file__) | ||||
| 
 | ||||
| 
 | ||||
| def _add_bootstrap_to_pythonpath(bootstrap_dir): | ||||
|     """ | ||||
|     Add our bootstrap directory to the head of $PYTHONPATH to ensure | ||||
|     it is loaded before program code | ||||
|     """ | ||||
|     python_path = os.environ.get('PYTHONPATH', '') | ||||
| 
 | ||||
|     if python_path: | ||||
|         new_path = '%s%s%s' % (bootstrap_dir, os.path.pathsep, os.environ['PYTHONPATH']) | ||||
|         os.environ['PYTHONPATH'] = new_path | ||||
|     else: | ||||
|         os.environ['PYTHONPATH'] = bootstrap_dir | ||||
| 
 | ||||
| 
 | ||||
| def main(): | ||||
|     if len(sys.argv) < 2 or sys.argv[1] == '-h': | ||||
|         print(USAGE) | ||||
|         return | ||||
| 
 | ||||
|     log.debug('sys.argv: %s', sys.argv) | ||||
| 
 | ||||
|     root_dir = _ddtrace_root() | ||||
|     log.debug('ddtrace root: %s', root_dir) | ||||
| 
 | ||||
|     bootstrap_dir = os.path.join(root_dir, 'bootstrap') | ||||
|     log.debug('ddtrace bootstrap: %s', bootstrap_dir) | ||||
| 
 | ||||
|     _add_bootstrap_to_pythonpath(bootstrap_dir) | ||||
|     log.debug('PYTHONPATH: %s', os.environ['PYTHONPATH']) | ||||
|     log.debug('sys.path: %s', sys.path) | ||||
| 
 | ||||
|     executable = sys.argv[1] | ||||
| 
 | ||||
|     # Find the executable path | ||||
|     executable = spawn.find_executable(executable) | ||||
|     log.debug('program executable: %s', executable) | ||||
| 
 | ||||
|     os.execl(executable, executable, *sys.argv[2:]) | ||||
|  | @ -0,0 +1,151 @@ | |||
| import platform | ||||
| import re | ||||
| import sys | ||||
| import textwrap | ||||
| 
 | ||||
| from ddtrace.vendor import six | ||||
| 
 | ||||
| __all__ = [ | ||||
|     'httplib', | ||||
|     'iteritems', | ||||
|     'PY2', | ||||
|     'Queue', | ||||
|     'stringify', | ||||
|     'StringIO', | ||||
|     'urlencode', | ||||
|     'parse', | ||||
|     'reraise', | ||||
| ] | ||||
| 
 | ||||
| PYTHON_VERSION_INFO = sys.version_info | ||||
| PY2 = sys.version_info[0] == 2 | ||||
| PY3 = sys.version_info[0] == 3 | ||||
| 
 | ||||
| # Infos about python passed to the trace agent through the header | ||||
| PYTHON_VERSION = platform.python_version() | ||||
| PYTHON_INTERPRETER = platform.python_implementation() | ||||
| 
 | ||||
| try: | ||||
|     StringIO = six.moves.cStringIO | ||||
| except ImportError: | ||||
|     StringIO = six.StringIO | ||||
| 
 | ||||
| httplib = six.moves.http_client | ||||
| urlencode = six.moves.urllib.parse.urlencode | ||||
| parse = six.moves.urllib.parse | ||||
| Queue = six.moves.queue.Queue | ||||
| iteritems = six.iteritems | ||||
| reraise = six.reraise | ||||
| reload_module = six.moves.reload_module | ||||
| 
 | ||||
| stringify = six.text_type | ||||
| string_type = six.string_types[0] | ||||
| msgpack_type = six.binary_type | ||||
| # DEV: `six` doesn't have `float` in `integer_types` | ||||
| numeric_types = six.integer_types + (float, ) | ||||
| 
 | ||||
| # Pattern class generated by `re.compile` | ||||
| if PYTHON_VERSION_INFO >= (3, 7): | ||||
|     pattern_type = re.Pattern | ||||
| else: | ||||
|     pattern_type = re._pattern_type | ||||
| 
 | ||||
| 
 | ||||
| def is_integer(obj): | ||||
|     """Helper to determine if the provided ``obj`` is an integer type or not""" | ||||
|     # DEV: We have to make sure it is an integer and not a boolean | ||||
|     # >>> type(True) | ||||
|     # <class 'bool'> | ||||
|     # >>> isinstance(True, int) | ||||
|     # True | ||||
|     return isinstance(obj, six.integer_types) and not isinstance(obj, bool) | ||||
| 
 | ||||
| 
 | ||||
| try: | ||||
|     from time import time_ns | ||||
| except ImportError: | ||||
|     from time import time as _time | ||||
| 
 | ||||
|     def time_ns(): | ||||
|         return int(_time() * 10e5) * 1000 | ||||
| 
 | ||||
| 
 | ||||
| if PYTHON_VERSION_INFO[0:2] >= (3, 4): | ||||
|     from asyncio import iscoroutinefunction | ||||
| 
 | ||||
|     # Execute from a string to get around syntax errors from `yield from` | ||||
|     # DEV: The idea to do this was stolen from `six` | ||||
|     #   https://github.com/benjaminp/six/blob/15e31431af97e5e64b80af0a3f598d382bcdd49a/six.py#L719-L737 | ||||
|     six.exec_(textwrap.dedent(""" | ||||
|     import functools | ||||
|     import asyncio | ||||
| 
 | ||||
| 
 | ||||
|     def make_async_decorator(tracer, coro, *params, **kw_params): | ||||
|         \"\"\" | ||||
|         Decorator factory that creates an asynchronous wrapper that yields | ||||
|         a coroutine result. This factory is required to handle Python 2 | ||||
|         compatibilities. | ||||
| 
 | ||||
|         :param object tracer: the tracer instance that is used | ||||
|         :param function f: the coroutine that must be executed | ||||
|         :param tuple params: arguments given to the Tracer.trace() | ||||
|         :param dict kw_params: keyword arguments given to the Tracer.trace() | ||||
|         \"\"\" | ||||
|         @functools.wraps(coro) | ||||
|         @asyncio.coroutine | ||||
|         def func_wrapper(*args, **kwargs): | ||||
|             with tracer.trace(*params, **kw_params): | ||||
|                 result = yield from coro(*args, **kwargs)  # noqa: E999 | ||||
|                 return result | ||||
| 
 | ||||
|         return func_wrapper | ||||
|     """)) | ||||
| 
 | ||||
| else: | ||||
|     # asyncio is missing so we can't have coroutines; these | ||||
|     # functions are used only to ensure code executions in case | ||||
|     # of an unexpected behavior | ||||
|     def iscoroutinefunction(fn): | ||||
|         return False | ||||
| 
 | ||||
|     def make_async_decorator(tracer, fn, *params, **kw_params): | ||||
|         return fn | ||||
| 
 | ||||
| 
 | ||||
| # DEV: There is `six.u()` which does something similar, but doesn't have the guard around `hasattr(s, 'decode')` | ||||
| def to_unicode(s): | ||||
|     """ Return a unicode string for the given bytes or string instance. """ | ||||
|     # No reason to decode if we already have the unicode compatible object we expect | ||||
|     # DEV: `six.text_type` will be a `str` for python 3 and `unicode` for python 2 | ||||
|     # DEV: Double decoding a `unicode` can cause a `UnicodeEncodeError` | ||||
|     #   e.g. `'\xc3\xbf'.decode('utf-8').decode('utf-8')` | ||||
|     if isinstance(s, six.text_type): | ||||
|         return s | ||||
| 
 | ||||
|     # If the object has a `decode` method, then decode into `utf-8` | ||||
|     #   e.g. Python 2 `str`, Python 2/3 `bytearray`, etc | ||||
|     if hasattr(s, 'decode'): | ||||
|         return s.decode('utf-8') | ||||
| 
 | ||||
|     # Always try to coerce the object into the `six.text_type` object we expect | ||||
|     #   e.g. `to_unicode(1)`, `to_unicode(dict(key='value'))` | ||||
|     return six.text_type(s) | ||||
| 
 | ||||
| 
 | ||||
| def get_connection_response(conn): | ||||
|     """Returns the response for a connection. | ||||
| 
 | ||||
|     If using Python 2 enable buffering. | ||||
| 
 | ||||
|     Python 2 does not enable buffering by default resulting in many recv | ||||
|     syscalls. | ||||
| 
 | ||||
|     See: | ||||
|     https://bugs.python.org/issue4879 | ||||
|     https://github.com/python/cpython/commit/3c43fcba8b67ea0cec4a443c755ce5f25990a6cf | ||||
|     """ | ||||
|     if PY2: | ||||
|         return conn.getresponse(buffering=True) | ||||
|     else: | ||||
|         return conn.getresponse() | ||||
|  | @ -0,0 +1,15 @@ | |||
| FILTERS_KEY = 'FILTERS' | ||||
| SAMPLE_RATE_METRIC_KEY = '_sample_rate' | ||||
| SAMPLING_PRIORITY_KEY = '_sampling_priority_v1' | ||||
| ANALYTICS_SAMPLE_RATE_KEY = '_dd1.sr.eausr' | ||||
| SAMPLING_AGENT_DECISION = '_dd.agent_psr' | ||||
| SAMPLING_RULE_DECISION = '_dd.rule_psr' | ||||
| SAMPLING_LIMIT_DECISION = '_dd.limit_psr' | ||||
| ORIGIN_KEY = '_dd.origin' | ||||
| HOSTNAME_KEY = '_dd.hostname' | ||||
| ENV_KEY = 'env' | ||||
| 
 | ||||
| NUMERIC_TAGS = (ANALYTICS_SAMPLE_RATE_KEY, ) | ||||
| 
 | ||||
| MANUAL_DROP_KEY = 'manual.drop' | ||||
| MANUAL_KEEP_KEY = 'manual.keep' | ||||
|  | @ -0,0 +1,216 @@ | |||
| import logging | ||||
| import threading | ||||
| 
 | ||||
| from .constants import HOSTNAME_KEY, SAMPLING_PRIORITY_KEY, ORIGIN_KEY | ||||
| from .internal.logger import get_logger | ||||
| from .internal import hostname | ||||
| from .settings import config | ||||
| from .utils.formats import asbool, get_env | ||||
| 
 | ||||
| log = get_logger(__name__) | ||||
| 
 | ||||
| 
 | ||||
| class Context(object): | ||||
|     """ | ||||
|     Context is used to keep track of a hierarchy of spans for the current | ||||
|     execution flow. During each logical execution, the same ``Context`` is | ||||
|     used to represent a single logical trace, even if the trace is built | ||||
|     asynchronously. | ||||
| 
 | ||||
|     A single code execution may use multiple ``Context`` if part of the execution | ||||
|     must not be related to the current tracing. As example, a delayed job may | ||||
|     compose a standalone trace instead of being related to the same trace that | ||||
|     generates the job itself. On the other hand, if it's part of the same | ||||
|     ``Context``, it will be related to the original trace. | ||||
| 
 | ||||
|     This data structure is thread-safe. | ||||
|     """ | ||||
|     _partial_flush_enabled = asbool(get_env('tracer', 'partial_flush_enabled', 'false')) | ||||
|     _partial_flush_min_spans = int(get_env('tracer', 'partial_flush_min_spans', 500)) | ||||
| 
 | ||||
|     def __init__(self, trace_id=None, span_id=None, sampling_priority=None, _dd_origin=None): | ||||
|         """ | ||||
|         Initialize a new thread-safe ``Context``. | ||||
| 
 | ||||
|         :param int trace_id: trace_id of parent span | ||||
|         :param int span_id: span_id of parent span | ||||
|         """ | ||||
|         self._trace = [] | ||||
|         self._finished_spans = 0 | ||||
|         self._current_span = None | ||||
|         self._lock = threading.Lock() | ||||
| 
 | ||||
|         self._parent_trace_id = trace_id | ||||
|         self._parent_span_id = span_id | ||||
|         self._sampling_priority = sampling_priority | ||||
|         self._dd_origin = _dd_origin | ||||
| 
 | ||||
|     @property | ||||
|     def trace_id(self): | ||||
|         """Return current context trace_id.""" | ||||
|         with self._lock: | ||||
|             return self._parent_trace_id | ||||
| 
 | ||||
|     @property | ||||
|     def span_id(self): | ||||
|         """Return current context span_id.""" | ||||
|         with self._lock: | ||||
|             return self._parent_span_id | ||||
| 
 | ||||
|     @property | ||||
|     def sampling_priority(self): | ||||
|         """Return current context sampling priority.""" | ||||
|         with self._lock: | ||||
|             return self._sampling_priority | ||||
| 
 | ||||
|     @sampling_priority.setter | ||||
|     def sampling_priority(self, value): | ||||
|         """Set sampling priority.""" | ||||
|         with self._lock: | ||||
|             self._sampling_priority = value | ||||
| 
 | ||||
|     def clone(self): | ||||
|         """ | ||||
|         Partially clones the current context. | ||||
|         It copies everything EXCEPT the registered and finished spans. | ||||
|         """ | ||||
|         with self._lock: | ||||
|             new_ctx = Context( | ||||
|                 trace_id=self._parent_trace_id, | ||||
|                 span_id=self._parent_span_id, | ||||
|                 sampling_priority=self._sampling_priority, | ||||
|             ) | ||||
|             new_ctx._current_span = self._current_span | ||||
|             return new_ctx | ||||
| 
 | ||||
|     def get_current_root_span(self): | ||||
|         """ | ||||
|         Return the root span of the context or None if it does not exist. | ||||
|         """ | ||||
|         return self._trace[0] if len(self._trace) > 0 else None | ||||
| 
 | ||||
|     def get_current_span(self): | ||||
|         """ | ||||
|         Return the last active span that corresponds to the last inserted | ||||
|         item in the trace list. This cannot be considered as the current active | ||||
|         span in asynchronous environments, because some spans can be closed | ||||
|         earlier while child spans still need to finish their traced execution. | ||||
|         """ | ||||
|         with self._lock: | ||||
|             return self._current_span | ||||
| 
 | ||||
|     def _set_current_span(self, span): | ||||
|         """ | ||||
|         Set current span internally. | ||||
| 
 | ||||
|         Non-safe if not used with a lock. For internal Context usage only. | ||||
|         """ | ||||
|         self._current_span = span | ||||
|         if span: | ||||
|             self._parent_trace_id = span.trace_id | ||||
|             self._parent_span_id = span.span_id | ||||
|         else: | ||||
|             self._parent_span_id = None | ||||
| 
 | ||||
|     def add_span(self, span): | ||||
|         """ | ||||
|         Add a span to the context trace list, keeping it as the last active span. | ||||
|         """ | ||||
|         with self._lock: | ||||
|             self._set_current_span(span) | ||||
| 
 | ||||
|             self._trace.append(span) | ||||
|             span._context = self | ||||
| 
 | ||||
|     def close_span(self, span): | ||||
|         """ | ||||
|         Mark a span as a finished, increasing the internal counter to prevent | ||||
|         cycles inside _trace list. | ||||
|         """ | ||||
|         with self._lock: | ||||
|             self._finished_spans += 1 | ||||
|             self._set_current_span(span._parent) | ||||
| 
 | ||||
|             # notify if the trace is not closed properly; this check is executed only | ||||
|             # if the debug logging is enabled and when the root span is closed | ||||
|             # for an unfinished trace. This logging is meant to be used for debugging | ||||
|             # reasons, and it doesn't mean that the trace is wrongly generated. | ||||
|             # In asynchronous environments, it's legit to close the root span before | ||||
|             # some children. On the other hand, asynchronous web frameworks still expect | ||||
|             # to close the root span after all the children. | ||||
|             if span.tracer and span.tracer.log.isEnabledFor(logging.DEBUG) and span._parent is None: | ||||
|                 unfinished_spans = [x for x in self._trace if not x.finished] | ||||
|                 if unfinished_spans: | ||||
|                     log.debug('Root span "%s" closed, but the trace has %d unfinished spans:', | ||||
|                               span.name, len(unfinished_spans)) | ||||
|                     for wrong_span in unfinished_spans: | ||||
|                         log.debug('\n%s', wrong_span.pprint()) | ||||
| 
 | ||||
|     def _is_sampled(self): | ||||
|         return any(span.sampled for span in self._trace) | ||||
| 
 | ||||
|     def get(self): | ||||
|         """ | ||||
|         Returns a tuple containing the trace list generated in the current context and | ||||
|         if the context is sampled or not. It returns (None, None) if the ``Context`` is | ||||
|         not finished. If a trace is returned, the ``Context`` will be reset so that it | ||||
|         can be re-used immediately. | ||||
| 
 | ||||
|         This operation is thread-safe. | ||||
|         """ | ||||
|         with self._lock: | ||||
|             # All spans are finished? | ||||
|             if self._finished_spans == len(self._trace): | ||||
|                 # get the trace | ||||
|                 trace = self._trace | ||||
|                 sampled = self._is_sampled() | ||||
|                 sampling_priority = self._sampling_priority | ||||
|                 # attach the sampling priority to the context root span | ||||
|                 if sampled and sampling_priority is not None and trace: | ||||
|                     trace[0].set_metric(SAMPLING_PRIORITY_KEY, sampling_priority) | ||||
|                 origin = self._dd_origin | ||||
|                 # attach the origin to the root span tag | ||||
|                 if sampled and origin is not None and trace: | ||||
|                     trace[0].set_tag(ORIGIN_KEY, origin) | ||||
| 
 | ||||
|                 # Set hostname tag if they requested it | ||||
|                 if config.report_hostname: | ||||
|                     # DEV: `get_hostname()` value is cached | ||||
|                     trace[0].set_tag(HOSTNAME_KEY, hostname.get_hostname()) | ||||
| 
 | ||||
|                 # clean the current state | ||||
|                 self._trace = [] | ||||
|                 self._finished_spans = 0 | ||||
|                 self._parent_trace_id = None | ||||
|                 self._parent_span_id = None | ||||
|                 self._sampling_priority = None | ||||
|                 return trace, sampled | ||||
| 
 | ||||
|             elif self._partial_flush_enabled: | ||||
|                 finished_spans = [t for t in self._trace if t.finished] | ||||
|                 if len(finished_spans) >= self._partial_flush_min_spans: | ||||
|                     # partial flush when enabled and we have more than the minimal required spans | ||||
|                     trace = self._trace | ||||
|                     sampled = self._is_sampled() | ||||
|                     sampling_priority = self._sampling_priority | ||||
|                     # attach the sampling priority to the context root span | ||||
|                     if sampled and sampling_priority is not None and trace: | ||||
|                         trace[0].set_metric(SAMPLING_PRIORITY_KEY, sampling_priority) | ||||
|                     origin = self._dd_origin | ||||
|                     # attach the origin to the root span tag | ||||
|                     if sampled and origin is not None and trace: | ||||
|                         trace[0].set_tag(ORIGIN_KEY, origin) | ||||
| 
 | ||||
|                     # Set hostname tag if they requested it | ||||
|                     if config.report_hostname: | ||||
|                         # DEV: `get_hostname()` value is cached | ||||
|                         trace[0].set_tag(HOSTNAME_KEY, hostname.get_hostname()) | ||||
| 
 | ||||
|                     self._finished_spans = 0 | ||||
| 
 | ||||
|                     # Any open spans will remain as `self._trace` | ||||
|                     # Any finished spans will get returned to be flushed | ||||
|                     self._trace = [t for t in self._trace if not t.finished] | ||||
| 
 | ||||
|                     return finished_spans, sampled | ||||
|             return None, None | ||||
|  | @ -0,0 +1 @@ | |||
| from ..utils.importlib import func_name, module_name, require_modules  # noqa | ||||
|  | @ -0,0 +1,30 @@ | |||
| """ | ||||
| The aiobotocore integration will trace all AWS calls made with the ``aiobotocore`` | ||||
| library. This integration isn't enabled when applying the default patching. | ||||
| To enable it, you must run ``patch_all(aiobotocore=True)`` | ||||
| 
 | ||||
| :: | ||||
| 
 | ||||
|     import aiobotocore.session | ||||
|     from ddtrace import patch | ||||
| 
 | ||||
|     # If not patched yet, you can patch botocore specifically | ||||
|     patch(aiobotocore=True) | ||||
| 
 | ||||
|     # This will report spans with the default instrumentation | ||||
|     aiobotocore.session.get_session() | ||||
|     lambda_client = session.create_client('lambda', region_name='us-east-1') | ||||
| 
 | ||||
|     # This query generates a trace | ||||
|     lambda_client.list_functions() | ||||
| """ | ||||
| from ...utils.importlib import require_modules | ||||
| 
 | ||||
| 
 | ||||
| required_modules = ['aiobotocore.client'] | ||||
| 
 | ||||
| with require_modules(required_modules) as missing_modules: | ||||
|     if not missing_modules: | ||||
|         from .patch import patch | ||||
| 
 | ||||
|         __all__ = ['patch'] | ||||
|  | @ -0,0 +1,129 @@ | |||
| import asyncio | ||||
| from ddtrace.vendor import wrapt | ||||
| from ddtrace import config | ||||
| import aiobotocore.client | ||||
| 
 | ||||
| from aiobotocore.endpoint import ClientResponseContentProxy | ||||
| 
 | ||||
| from ...constants import ANALYTICS_SAMPLE_RATE_KEY | ||||
| from ...pin import Pin | ||||
| from ...ext import SpanTypes, http, aws | ||||
| from ...compat import PYTHON_VERSION_INFO | ||||
| from ...utils.formats import deep_getattr | ||||
| from ...utils.wrappers import unwrap | ||||
| 
 | ||||
| 
 | ||||
| ARGS_NAME = ('action', 'params', 'path', 'verb') | ||||
| TRACED_ARGS = ['params', 'path', 'verb'] | ||||
| 
 | ||||
| 
 | ||||
| def patch(): | ||||
|     if getattr(aiobotocore.client, '_datadog_patch', False): | ||||
|         return | ||||
|     setattr(aiobotocore.client, '_datadog_patch', True) | ||||
| 
 | ||||
|     wrapt.wrap_function_wrapper('aiobotocore.client', 'AioBaseClient._make_api_call', _wrapped_api_call) | ||||
|     Pin(service='aws', app='aws').onto(aiobotocore.client.AioBaseClient) | ||||
| 
 | ||||
| 
 | ||||
| def unpatch(): | ||||
|     if getattr(aiobotocore.client, '_datadog_patch', False): | ||||
|         setattr(aiobotocore.client, '_datadog_patch', False) | ||||
|         unwrap(aiobotocore.client.AioBaseClient, '_make_api_call') | ||||
| 
 | ||||
| 
 | ||||
| class WrappedClientResponseContentProxy(wrapt.ObjectProxy): | ||||
|     def __init__(self, body, pin, parent_span): | ||||
|         super(WrappedClientResponseContentProxy, self).__init__(body) | ||||
|         self._self_pin = pin | ||||
|         self._self_parent_span = parent_span | ||||
| 
 | ||||
|     @asyncio.coroutine | ||||
|     def read(self, *args, **kwargs): | ||||
|         # async read that must be child of the parent span operation | ||||
|         operation_name = '{}.read'.format(self._self_parent_span.name) | ||||
| 
 | ||||
|         with self._self_pin.tracer.start_span(operation_name, child_of=self._self_parent_span) as span: | ||||
|             # inherit parent attributes | ||||
|             span.resource = self._self_parent_span.resource | ||||
|             span.span_type = self._self_parent_span.span_type | ||||
|             span.meta = dict(self._self_parent_span.meta) | ||||
|             span.metrics = dict(self._self_parent_span.metrics) | ||||
| 
 | ||||
|             result = yield from self.__wrapped__.read(*args, **kwargs) | ||||
|             span.set_tag('Length', len(result)) | ||||
| 
 | ||||
|         return result | ||||
| 
 | ||||
|     # wrapt doesn't proxy `async with` context managers | ||||
|     if PYTHON_VERSION_INFO >= (3, 5, 0): | ||||
|         @asyncio.coroutine | ||||
|         def __aenter__(self): | ||||
|             # call the wrapped method but return the object proxy | ||||
|             yield from self.__wrapped__.__aenter__() | ||||
|             return self | ||||
| 
 | ||||
|         @asyncio.coroutine | ||||
|         def __aexit__(self, *args, **kwargs): | ||||
|             response = yield from self.__wrapped__.__aexit__(*args, **kwargs) | ||||
|             return response | ||||
| 
 | ||||
| 
 | ||||
| @asyncio.coroutine | ||||
| def _wrapped_api_call(original_func, instance, args, kwargs): | ||||
|     pin = Pin.get_from(instance) | ||||
|     if not pin or not pin.enabled(): | ||||
|         result = yield from original_func(*args, **kwargs) | ||||
|         return result | ||||
| 
 | ||||
|     endpoint_name = deep_getattr(instance, '_endpoint._endpoint_prefix') | ||||
| 
 | ||||
|     with pin.tracer.trace('{}.command'.format(endpoint_name), | ||||
|                           service='{}.{}'.format(pin.service, endpoint_name), | ||||
|                           span_type=SpanTypes.HTTP) as span: | ||||
| 
 | ||||
|         if len(args) > 0: | ||||
|             operation = args[0] | ||||
|             span.resource = '{}.{}'.format(endpoint_name, operation.lower()) | ||||
|         else: | ||||
|             operation = None | ||||
|             span.resource = endpoint_name | ||||
| 
 | ||||
|         aws.add_span_arg_tags(span, endpoint_name, args, ARGS_NAME, TRACED_ARGS) | ||||
| 
 | ||||
|         region_name = deep_getattr(instance, 'meta.region_name') | ||||
| 
 | ||||
|         meta = { | ||||
|             'aws.agent': 'aiobotocore', | ||||
|             'aws.operation': operation, | ||||
|             'aws.region': region_name, | ||||
|         } | ||||
|         span.set_tags(meta) | ||||
| 
 | ||||
|         result = yield from original_func(*args, **kwargs) | ||||
| 
 | ||||
|         body = result.get('Body') | ||||
|         if isinstance(body, ClientResponseContentProxy): | ||||
|             result['Body'] = WrappedClientResponseContentProxy(body, pin, span) | ||||
| 
 | ||||
|         response_meta = result['ResponseMetadata'] | ||||
|         response_headers = response_meta['HTTPHeaders'] | ||||
| 
 | ||||
|         span.set_tag(http.STATUS_CODE, response_meta['HTTPStatusCode']) | ||||
|         span.set_tag('retry_attempts', response_meta['RetryAttempts']) | ||||
| 
 | ||||
|         request_id = response_meta.get('RequestId') | ||||
|         if request_id: | ||||
|             span.set_tag('aws.requestid', request_id) | ||||
| 
 | ||||
|         request_id2 = response_headers.get('x-amz-id-2') | ||||
|         if request_id2: | ||||
|             span.set_tag('aws.requestid2', request_id2) | ||||
| 
 | ||||
|         # set analytics sample rate | ||||
|         span.set_tag( | ||||
|             ANALYTICS_SAMPLE_RATE_KEY, | ||||
|             config.aiobotocore.get_analytics_sample_rate() | ||||
|         ) | ||||
| 
 | ||||
|         return result | ||||
|  | @ -0,0 +1,61 @@ | |||
| """ | ||||
| The ``aiohttp`` integration traces all requests defined in the application handlers. | ||||
| Auto instrumentation is available using the ``trace_app`` function:: | ||||
| 
 | ||||
|     from aiohttp import web | ||||
|     from ddtrace import tracer, patch | ||||
|     from ddtrace.contrib.aiohttp import trace_app | ||||
| 
 | ||||
|     # patch third-party modules like aiohttp_jinja2 | ||||
|     patch(aiohttp=True) | ||||
| 
 | ||||
|     # create your application | ||||
|     app = web.Application() | ||||
|     app.router.add_get('/', home_handler) | ||||
| 
 | ||||
|     # trace your application handlers | ||||
|     trace_app(app, tracer, service='async-api') | ||||
|     web.run_app(app, port=8000) | ||||
| 
 | ||||
| Integration settings are attached to your application under the ``datadog_trace`` | ||||
| namespace. You can read or update them as follows:: | ||||
| 
 | ||||
|     # disables distributed tracing for all received requests | ||||
|     app['datadog_trace']['distributed_tracing_enabled'] = False | ||||
| 
 | ||||
| Available settings are: | ||||
| 
 | ||||
| * ``tracer`` (default: ``ddtrace.tracer``): set the default tracer instance that is used to | ||||
|   trace `aiohttp` internals. By default the `ddtrace` tracer is used. | ||||
| * ``service`` (default: ``aiohttp-web``): set the service name used by the tracer. Usually | ||||
|   this configuration must be updated with a meaningful name. | ||||
| * ``distributed_tracing_enabled`` (default: ``True``): enable distributed tracing during | ||||
|   the middleware execution, so that a new span is created with the given ``trace_id`` and | ||||
|   ``parent_id`` injected via request headers. | ||||
| * ``analytics_enabled`` (default: ``None``): enables APM events in Trace Search & Analytics. | ||||
| 
 | ||||
| Third-party modules that are currently supported by the ``patch()`` method are: | ||||
| 
 | ||||
| * ``aiohttp_jinja2`` | ||||
| 
 | ||||
| When a request span is created, a new ``Context`` for this logical execution is attached | ||||
| to the ``request`` object, so that it can be used in the application code:: | ||||
| 
 | ||||
|     async def home_handler(request): | ||||
|         ctx = request['datadog_context'] | ||||
|         # do something with the tracing Context | ||||
| """ | ||||
| from ...utils.importlib import require_modules | ||||
| 
 | ||||
| required_modules = ['aiohttp'] | ||||
| 
 | ||||
| with require_modules(required_modules) as missing_modules: | ||||
|     if not missing_modules: | ||||
|         from .patch import patch, unpatch | ||||
|         from .middlewares import trace_app | ||||
| 
 | ||||
|         __all__ = [ | ||||
|             'patch', | ||||
|             'unpatch', | ||||
|             'trace_app', | ||||
|         ] | ||||
|  | @ -0,0 +1,146 @@ | |||
| import asyncio | ||||
| 
 | ||||
| from ..asyncio import context_provider | ||||
| from ...compat import stringify | ||||
| from ...constants import ANALYTICS_SAMPLE_RATE_KEY | ||||
| from ...ext import SpanTypes, http | ||||
| from ...propagation.http import HTTPPropagator | ||||
| from ...settings import config | ||||
| 
 | ||||
| 
 | ||||
| CONFIG_KEY = 'datadog_trace' | ||||
| REQUEST_CONTEXT_KEY = 'datadog_context' | ||||
| REQUEST_CONFIG_KEY = '__datadog_trace_config' | ||||
| REQUEST_SPAN_KEY = '__datadog_request_span' | ||||
| 
 | ||||
| 
 | ||||
| @asyncio.coroutine | ||||
| def trace_middleware(app, handler): | ||||
|     """ | ||||
|     ``aiohttp`` middleware that traces the handler execution. | ||||
|     Because handlers are run in different tasks for each request, we attach the Context | ||||
|     instance both to the Task and to the Request objects. In this way: | ||||
| 
 | ||||
|     * the Task is used by the internal automatic instrumentation | ||||
|     * the ``Context`` attached to the request can be freely used in the application code | ||||
|     """ | ||||
|     @asyncio.coroutine | ||||
|     def attach_context(request): | ||||
|         # application configs | ||||
|         tracer = app[CONFIG_KEY]['tracer'] | ||||
|         service = app[CONFIG_KEY]['service'] | ||||
|         distributed_tracing = app[CONFIG_KEY]['distributed_tracing_enabled'] | ||||
| 
 | ||||
|         # Create a new context based on the propagated information. | ||||
|         if distributed_tracing: | ||||
|             propagator = HTTPPropagator() | ||||
|             context = propagator.extract(request.headers) | ||||
|             # Only need to active the new context if something was propagated | ||||
|             if context.trace_id: | ||||
|                 tracer.context_provider.activate(context) | ||||
| 
 | ||||
|         # trace the handler | ||||
|         request_span = tracer.trace( | ||||
|             'aiohttp.request', | ||||
|             service=service, | ||||
|             span_type=SpanTypes.WEB, | ||||
|         ) | ||||
| 
 | ||||
|         # Configure trace search sample rate | ||||
|         # DEV: aiohttp is special case maintains separate configuration from config api | ||||
|         analytics_enabled = app[CONFIG_KEY]['analytics_enabled'] | ||||
|         if (config.analytics_enabled and analytics_enabled is not False) or analytics_enabled is True: | ||||
|             request_span.set_tag( | ||||
|                 ANALYTICS_SAMPLE_RATE_KEY, | ||||
|                 app[CONFIG_KEY].get('analytics_sample_rate', True) | ||||
|             ) | ||||
| 
 | ||||
|         # attach the context and the root span to the request; the Context | ||||
|         # may be freely used by the application code | ||||
|         request[REQUEST_CONTEXT_KEY] = request_span.context | ||||
|         request[REQUEST_SPAN_KEY] = request_span | ||||
|         request[REQUEST_CONFIG_KEY] = app[CONFIG_KEY] | ||||
|         try: | ||||
|             response = yield from handler(request) | ||||
|             return response | ||||
|         except Exception: | ||||
|             request_span.set_traceback() | ||||
|             raise | ||||
|     return attach_context | ||||
| 
 | ||||
| 
 | ||||
| @asyncio.coroutine | ||||
| def on_prepare(request, response): | ||||
|     """ | ||||
|     The on_prepare signal is used to close the request span that is created during | ||||
|     the trace middleware execution. | ||||
|     """ | ||||
|     # safe-guard: discard if we don't have a request span | ||||
|     request_span = request.get(REQUEST_SPAN_KEY, None) | ||||
|     if not request_span: | ||||
|         return | ||||
| 
 | ||||
|     # default resource name | ||||
|     resource = stringify(response.status) | ||||
| 
 | ||||
|     if request.match_info.route.resource: | ||||
|         # collect the resource name based on http resource type | ||||
|         res_info = request.match_info.route.resource.get_info() | ||||
| 
 | ||||
|         if res_info.get('path'): | ||||
|             resource = res_info.get('path') | ||||
|         elif res_info.get('formatter'): | ||||
|             resource = res_info.get('formatter') | ||||
|         elif res_info.get('prefix'): | ||||
|             resource = res_info.get('prefix') | ||||
| 
 | ||||
|         # prefix the resource name by the http method | ||||
|         resource = '{} {}'.format(request.method, resource) | ||||
| 
 | ||||
|     if 500 <= response.status < 600: | ||||
|         request_span.error = 1 | ||||
| 
 | ||||
|     request_span.resource = resource | ||||
|     request_span.set_tag('http.method', request.method) | ||||
|     request_span.set_tag('http.status_code', response.status) | ||||
|     request_span.set_tag(http.URL, request.url.with_query(None)) | ||||
|     # DEV: aiohttp is special case maintains separate configuration from config api | ||||
|     trace_query_string = request[REQUEST_CONFIG_KEY].get('trace_query_string') | ||||
|     if trace_query_string is None: | ||||
|         trace_query_string = config._http.trace_query_string | ||||
|     if trace_query_string: | ||||
|         request_span.set_tag(http.QUERY_STRING, request.query_string) | ||||
|     request_span.finish() | ||||
| 
 | ||||
| 
 | ||||
| def trace_app(app, tracer, service='aiohttp-web'): | ||||
|     """ | ||||
|     Tracing function that patches the ``aiohttp`` application so that it will be | ||||
|     traced using the given ``tracer``. | ||||
| 
 | ||||
|     :param app: aiohttp application to trace | ||||
|     :param tracer: tracer instance to use | ||||
|     :param service: service name of tracer | ||||
|     """ | ||||
| 
 | ||||
|     # safe-guard: don't trace an application twice | ||||
|     if getattr(app, '__datadog_trace', False): | ||||
|         return | ||||
|     setattr(app, '__datadog_trace', True) | ||||
| 
 | ||||
|     # configure datadog settings | ||||
|     app[CONFIG_KEY] = { | ||||
|         'tracer': tracer, | ||||
|         'service': service, | ||||
|         'distributed_tracing_enabled': True, | ||||
|         'analytics_enabled': None, | ||||
|         'analytics_sample_rate': 1.0, | ||||
|     } | ||||
| 
 | ||||
|     # the tracer must work with asynchronous Context propagation | ||||
|     tracer.configure(context_provider=context_provider) | ||||
| 
 | ||||
|     # add the async tracer middleware as a first middleware | ||||
|     # and be sure that the on_prepare signal is the last one | ||||
|     app.middlewares.insert(0, trace_middleware) | ||||
|     app.on_response_prepare.append(on_prepare) | ||||
|  | @ -0,0 +1,39 @@ | |||
| from ddtrace.vendor import wrapt | ||||
| 
 | ||||
| from ...pin import Pin | ||||
| from ...utils.wrappers import unwrap | ||||
| 
 | ||||
| 
 | ||||
| try: | ||||
|     # instrument external packages only if they're available | ||||
|     import aiohttp_jinja2 | ||||
|     from .template import _trace_render_template | ||||
| 
 | ||||
|     template_module = True | ||||
| except ImportError: | ||||
|     template_module = False | ||||
| 
 | ||||
| 
 | ||||
| def patch(): | ||||
|     """ | ||||
|     Patch aiohttp third party modules: | ||||
|         * aiohttp_jinja2 | ||||
|     """ | ||||
|     if template_module: | ||||
|         if getattr(aiohttp_jinja2, '__datadog_patch', False): | ||||
|             return | ||||
|         setattr(aiohttp_jinja2, '__datadog_patch', True) | ||||
| 
 | ||||
|         _w = wrapt.wrap_function_wrapper | ||||
|         _w('aiohttp_jinja2', 'render_template', _trace_render_template) | ||||
|         Pin(app='aiohttp', service=None).onto(aiohttp_jinja2) | ||||
| 
 | ||||
| 
 | ||||
| def unpatch(): | ||||
|     """ | ||||
|     Remove tracing from patched modules. | ||||
|     """ | ||||
|     if template_module: | ||||
|         if getattr(aiohttp_jinja2, '__datadog_patch', False): | ||||
|             setattr(aiohttp_jinja2, '__datadog_patch', False) | ||||
|             unwrap(aiohttp_jinja2, 'render_template') | ||||
|  | @ -0,0 +1,29 @@ | |||
| import aiohttp_jinja2 | ||||
| 
 | ||||
| from ddtrace import Pin | ||||
| 
 | ||||
| from ...ext import SpanTypes | ||||
| 
 | ||||
| 
 | ||||
| def _trace_render_template(func, module, args, kwargs): | ||||
|     """ | ||||
|     Trace the template rendering | ||||
|     """ | ||||
|     # get the module pin | ||||
|     pin = Pin.get_from(aiohttp_jinja2) | ||||
|     if not pin or not pin.enabled(): | ||||
|         return func(*args, **kwargs) | ||||
| 
 | ||||
|     # original signature: | ||||
|     # render_template(template_name, request, context, *, app_key=APP_KEY, encoding='utf-8') | ||||
|     template_name = args[0] | ||||
|     request = args[1] | ||||
|     env = aiohttp_jinja2.get_env(request.app) | ||||
| 
 | ||||
|     # the prefix is available only on PackageLoader | ||||
|     template_prefix = getattr(env.loader, 'package_path', '') | ||||
|     template_meta = '{}/{}'.format(template_prefix, template_name) | ||||
| 
 | ||||
|     with pin.tracer.trace('aiohttp.template', span_type=SpanTypes.TEMPLATE) as span: | ||||
|         span.set_meta('aiohttp.template', template_meta) | ||||
|         return func(*args, **kwargs) | ||||
|  | @ -0,0 +1,27 @@ | |||
| """ | ||||
| Instrument aiopg to report a span for each executed Postgres queries:: | ||||
| 
 | ||||
|     from ddtrace import Pin, patch | ||||
|     import aiopg | ||||
| 
 | ||||
|     # If not patched yet, you can patch aiopg specifically | ||||
|     patch(aiopg=True) | ||||
| 
 | ||||
|     # This will report a span with the default settings | ||||
|     async with aiopg.connect(DSN) as db: | ||||
|         with (await db.cursor()) as cursor: | ||||
|             await cursor.execute("SELECT * FROM users WHERE id = 1") | ||||
| 
 | ||||
|     # Use a pin to specify metadata related to this connection | ||||
|     Pin.override(db, service='postgres-users') | ||||
| """ | ||||
| from ...utils.importlib import require_modules | ||||
| 
 | ||||
| 
 | ||||
| required_modules = ['aiopg'] | ||||
| 
 | ||||
| with require_modules(required_modules) as missing_modules: | ||||
|     if not missing_modules: | ||||
|         from .patch import patch | ||||
| 
 | ||||
|         __all__ = ['patch'] | ||||
|  | @ -0,0 +1,97 @@ | |||
| import asyncio | ||||
| from ddtrace.vendor import wrapt | ||||
| 
 | ||||
| from aiopg.utils import _ContextManager | ||||
| 
 | ||||
| from .. import dbapi | ||||
| from ...constants import ANALYTICS_SAMPLE_RATE_KEY | ||||
| from ...ext import SpanTypes, sql | ||||
| from ...pin import Pin | ||||
| from ...settings import config | ||||
| 
 | ||||
| 
 | ||||
| class AIOTracedCursor(wrapt.ObjectProxy): | ||||
|     """ TracedCursor wraps a psql cursor and traces its queries. """ | ||||
| 
 | ||||
|     def __init__(self, cursor, pin): | ||||
|         super(AIOTracedCursor, self).__init__(cursor) | ||||
|         pin.onto(self) | ||||
|         name = pin.app or 'sql' | ||||
|         self._datadog_name = '%s.query' % name | ||||
| 
 | ||||
|     @asyncio.coroutine | ||||
|     def _trace_method(self, method, resource, extra_tags, *args, **kwargs): | ||||
|         pin = Pin.get_from(self) | ||||
|         if not pin or not pin.enabled(): | ||||
|             result = yield from method(*args, **kwargs) | ||||
|             return result | ||||
|         service = pin.service | ||||
| 
 | ||||
|         with pin.tracer.trace(self._datadog_name, service=service, | ||||
|                               resource=resource, span_type=SpanTypes.SQL) as s: | ||||
|             s.set_tag(sql.QUERY, resource) | ||||
|             s.set_tags(pin.tags) | ||||
|             s.set_tags(extra_tags) | ||||
| 
 | ||||
|             # set analytics sample rate | ||||
|             s.set_tag( | ||||
|                 ANALYTICS_SAMPLE_RATE_KEY, | ||||
|                 config.aiopg.get_analytics_sample_rate() | ||||
|             ) | ||||
| 
 | ||||
|             try: | ||||
|                 result = yield from method(*args, **kwargs) | ||||
|                 return result | ||||
|             finally: | ||||
|                 s.set_metric('db.rowcount', self.rowcount) | ||||
| 
 | ||||
|     @asyncio.coroutine | ||||
|     def executemany(self, query, *args, **kwargs): | ||||
|         # FIXME[matt] properly handle kwargs here. arg names can be different | ||||
|         # with different libs. | ||||
|         result = yield from self._trace_method( | ||||
|             self.__wrapped__.executemany, query, {'sql.executemany': 'true'}, | ||||
|             query, *args, **kwargs) | ||||
|         return result | ||||
| 
 | ||||
|     @asyncio.coroutine | ||||
|     def execute(self, query, *args, **kwargs): | ||||
|         result = yield from self._trace_method( | ||||
|             self.__wrapped__.execute, query, {}, query, *args, **kwargs) | ||||
|         return result | ||||
| 
 | ||||
|     @asyncio.coroutine | ||||
|     def callproc(self, proc, args): | ||||
|         result = yield from self._trace_method( | ||||
|             self.__wrapped__.callproc, proc, {}, proc, args) | ||||
|         return result | ||||
| 
 | ||||
|     def __aiter__(self): | ||||
|         return self.__wrapped__.__aiter__() | ||||
| 
 | ||||
| 
 | ||||
| class AIOTracedConnection(wrapt.ObjectProxy): | ||||
|     """ TracedConnection wraps a Connection with tracing code. """ | ||||
| 
 | ||||
|     def __init__(self, conn, pin=None, cursor_cls=AIOTracedCursor): | ||||
|         super(AIOTracedConnection, self).__init__(conn) | ||||
|         name = dbapi._get_vendor(conn) | ||||
|         db_pin = pin or Pin(service=name, app=name) | ||||
|         db_pin.onto(self) | ||||
|         # wrapt requires prefix of `_self` for attributes that are only in the | ||||
|         # proxy (since some of our source objects will use `__slots__`) | ||||
|         self._self_cursor_cls = cursor_cls | ||||
| 
 | ||||
|     def cursor(self, *args, **kwargs): | ||||
|         # unfortunately we also need to patch this method as otherwise "self" | ||||
|         # ends up being the aiopg connection object | ||||
|         coro = self._cursor(*args, **kwargs) | ||||
|         return _ContextManager(coro) | ||||
| 
 | ||||
|     @asyncio.coroutine | ||||
|     def _cursor(self, *args, **kwargs): | ||||
|         cursor = yield from self.__wrapped__._cursor(*args, **kwargs) | ||||
|         pin = Pin.get_from(self) | ||||
|         if not pin: | ||||
|             return cursor | ||||
|         return self._self_cursor_cls(cursor, pin) | ||||
|  | @ -0,0 +1,57 @@ | |||
| # 3p | ||||
| import asyncio | ||||
| 
 | ||||
| import aiopg.connection | ||||
| import psycopg2.extensions | ||||
| from ddtrace.vendor import wrapt | ||||
| 
 | ||||
| from .connection import AIOTracedConnection | ||||
| from ..psycopg.patch import _patch_extensions, \ | ||||
|     _unpatch_extensions, patch_conn as psycopg_patch_conn | ||||
| from ...utils.wrappers import unwrap as _u | ||||
| 
 | ||||
| 
 | ||||
| def patch(): | ||||
|     """ Patch monkey patches psycopg's connection function | ||||
|         so that the connection's functions are traced. | ||||
|     """ | ||||
|     if getattr(aiopg, '_datadog_patch', False): | ||||
|         return | ||||
|     setattr(aiopg, '_datadog_patch', True) | ||||
| 
 | ||||
|     wrapt.wrap_function_wrapper(aiopg.connection, '_connect', patched_connect) | ||||
|     _patch_extensions(_aiopg_extensions)  # do this early just in case | ||||
| 
 | ||||
| 
 | ||||
| def unpatch(): | ||||
|     if getattr(aiopg, '_datadog_patch', False): | ||||
|         setattr(aiopg, '_datadog_patch', False) | ||||
|         _u(aiopg.connection, '_connect') | ||||
|         _unpatch_extensions(_aiopg_extensions) | ||||
| 
 | ||||
| 
 | ||||
| @asyncio.coroutine | ||||
| def patched_connect(connect_func, _, args, kwargs): | ||||
|     conn = yield from connect_func(*args, **kwargs) | ||||
|     return psycopg_patch_conn(conn, traced_conn_cls=AIOTracedConnection) | ||||
| 
 | ||||
| 
 | ||||
| def _extensions_register_type(func, _, args, kwargs): | ||||
|     def _unroll_args(obj, scope=None): | ||||
|         return obj, scope | ||||
|     obj, scope = _unroll_args(*args, **kwargs) | ||||
| 
 | ||||
|     # register_type performs a c-level check of the object | ||||
|     # type so we must be sure to pass in the actual db connection | ||||
|     if scope and isinstance(scope, wrapt.ObjectProxy): | ||||
|         scope = scope.__wrapped__._conn | ||||
| 
 | ||||
|     return func(obj, scope) if scope else func(obj) | ||||
| 
 | ||||
| 
 | ||||
| # extension hooks | ||||
| _aiopg_extensions = [ | ||||
|     (psycopg2.extensions.register_type, | ||||
|      psycopg2.extensions, 'register_type', | ||||
|      _extensions_register_type), | ||||
| ] | ||||
|  | @ -0,0 +1,32 @@ | |||
| """ | ||||
| The Algoliasearch__ integration will add tracing to your Algolia searches. | ||||
| 
 | ||||
| :: | ||||
| 
 | ||||
|     from ddtrace import patch_all | ||||
|     patch_all() | ||||
| 
 | ||||
|     from algoliasearch import algoliasearch | ||||
|     client = alogliasearch.Client(<ID>, <API_KEY>) | ||||
|     index = client.init_index(<INDEX_NAME>) | ||||
|     index.search("your query", args={"attributesToRetrieve": "attribute1,attribute1"}) | ||||
| 
 | ||||
| Configuration | ||||
| ~~~~~~~~~~~~~ | ||||
| 
 | ||||
| .. py:data:: ddtrace.config.algoliasearch['collect_query_text'] | ||||
| 
 | ||||
|    Whether to pass the text of your query onto Datadog. Since this may contain sensitive data it's off by default | ||||
| 
 | ||||
|    Default: ``False`` | ||||
| 
 | ||||
| .. __: https://www.algolia.com | ||||
| """ | ||||
| 
 | ||||
| from ...utils.importlib import require_modules | ||||
| 
 | ||||
| with require_modules(['algoliasearch', 'algoliasearch.version']) as missing_modules: | ||||
|     if not missing_modules: | ||||
|         from .patch import patch, unpatch | ||||
| 
 | ||||
|         __all__ = ['patch', 'unpatch'] | ||||
|  | @ -0,0 +1,143 @@ | |||
| from ddtrace.pin import Pin | ||||
| from ddtrace.settings import config | ||||
| from ddtrace.utils.wrappers import unwrap as _u | ||||
| from ddtrace.vendor.wrapt import wrap_function_wrapper as _w | ||||
| 
 | ||||
| DD_PATCH_ATTR = '_datadog_patch' | ||||
| 
 | ||||
| SERVICE_NAME = 'algoliasearch' | ||||
| APP_NAME = 'algoliasearch' | ||||
| 
 | ||||
| try: | ||||
|     import algoliasearch | ||||
|     from algoliasearch.version import VERSION | ||||
|     algoliasearch_version = tuple([int(i) for i in VERSION.split('.')]) | ||||
| 
 | ||||
|     # Default configuration | ||||
|     config._add('algoliasearch', dict( | ||||
|         service_name=SERVICE_NAME, | ||||
|         collect_query_text=False | ||||
|     )) | ||||
| except ImportError: | ||||
|     algoliasearch_version = (0, 0) | ||||
| 
 | ||||
| 
 | ||||
| def patch(): | ||||
|     if algoliasearch_version == (0, 0): | ||||
|         return | ||||
| 
 | ||||
|     if getattr(algoliasearch, DD_PATCH_ATTR, False): | ||||
|         return | ||||
| 
 | ||||
|     setattr(algoliasearch, '_datadog_patch', True) | ||||
| 
 | ||||
|     pin = Pin( | ||||
|         service=config.algoliasearch.service_name, app=APP_NAME | ||||
|     ) | ||||
| 
 | ||||
|     if algoliasearch_version < (2, 0) and algoliasearch_version >= (1, 0): | ||||
|         _w(algoliasearch.index, 'Index.search', _patched_search) | ||||
|         pin.onto(algoliasearch.index.Index) | ||||
|     elif algoliasearch_version >= (2, 0) and algoliasearch_version < (3, 0): | ||||
|         from algoliasearch import search_index | ||||
|         _w(algoliasearch, 'search_index.SearchIndex.search', _patched_search) | ||||
|         pin.onto(search_index.SearchIndex) | ||||
|     else: | ||||
|         return | ||||
| 
 | ||||
| 
 | ||||
| def unpatch(): | ||||
|     if algoliasearch_version == (0, 0): | ||||
|         return | ||||
| 
 | ||||
|     if getattr(algoliasearch, DD_PATCH_ATTR, False): | ||||
|         setattr(algoliasearch, DD_PATCH_ATTR, False) | ||||
| 
 | ||||
|     if algoliasearch_version < (2, 0) and algoliasearch_version >= (1, 0): | ||||
|         _u(algoliasearch.index.Index, 'search') | ||||
|     elif algoliasearch_version >= (2, 0) and algoliasearch_version < (3, 0): | ||||
|         from algoliasearch import search_index | ||||
|         _u(search_index.SearchIndex, 'search') | ||||
|     else: | ||||
|         return | ||||
| 
 | ||||
| 
 | ||||
| # DEV: this maps serves the dual purpose of enumerating the algoliasearch.search() query_args that | ||||
| # will be sent along as tags, as well as converting arguments names into tag names compliant with | ||||
| # tag naming recommendations set out here: https://docs.datadoghq.com/tagging/ | ||||
| QUERY_ARGS_DD_TAG_MAP = { | ||||
|     'page': 'page', | ||||
|     'hitsPerPage': 'hits_per_page', | ||||
|     'attributesToRetrieve': 'attributes_to_retrieve', | ||||
|     'attributesToHighlight': 'attributes_to_highlight', | ||||
|     'attributesToSnippet': 'attributes_to_snippet', | ||||
|     'minWordSizefor1Typo': 'min_word_size_for_1_typo', | ||||
|     'minWordSizefor2Typos': 'min_word_size_for_2_typos', | ||||
|     'getRankingInfo': 'get_ranking_info', | ||||
|     'aroundLatLng': 'around_lat_lng', | ||||
|     'numericFilters': 'numeric_filters', | ||||
|     'tagFilters': 'tag_filters', | ||||
|     'queryType': 'query_type', | ||||
|     'optionalWords': 'optional_words', | ||||
|     'distinct': 'distinct' | ||||
| } | ||||
| 
 | ||||
| 
 | ||||
| def _patched_search(func, instance, wrapt_args, wrapt_kwargs): | ||||
|     """ | ||||
|         wrapt_args is called the way it is to distinguish it from the 'args' | ||||
|         argument to the algoliasearch.index.Index.search() method. | ||||
|     """ | ||||
| 
 | ||||
|     if algoliasearch_version < (2, 0) and algoliasearch_version >= (1, 0): | ||||
|         function_query_arg_name = 'args' | ||||
|     elif algoliasearch_version >= (2, 0) and algoliasearch_version < (3, 0): | ||||
|         function_query_arg_name = 'request_options' | ||||
|     else: | ||||
|         return func(*wrapt_args, **wrapt_kwargs) | ||||
| 
 | ||||
|     pin = Pin.get_from(instance) | ||||
|     if not pin or not pin.enabled(): | ||||
|         return func(*wrapt_args, **wrapt_kwargs) | ||||
| 
 | ||||
|     with pin.tracer.trace('algoliasearch.search', service=pin.service) as span: | ||||
|         if not span.sampled: | ||||
|             return func(*wrapt_args, **wrapt_kwargs) | ||||
| 
 | ||||
|         if config.algoliasearch.collect_query_text: | ||||
|             span.set_tag('query.text', wrapt_kwargs.get('query', wrapt_args[0])) | ||||
| 
 | ||||
|         query_args = wrapt_kwargs.get(function_query_arg_name, wrapt_args[1] if len(wrapt_args) > 1 else None) | ||||
| 
 | ||||
|         if query_args and isinstance(query_args, dict): | ||||
|             for query_arg, tag_name in QUERY_ARGS_DD_TAG_MAP.items(): | ||||
|                 value = query_args.get(query_arg) | ||||
|                 if value is not None: | ||||
|                     span.set_tag('query.args.{}'.format(tag_name), value) | ||||
| 
 | ||||
|         # Result would look like this | ||||
|         # { | ||||
|         #   'hits': [ | ||||
|         #     { | ||||
|         #       .... your search results ... | ||||
|         #     } | ||||
|         #   ], | ||||
|         #   'processingTimeMS': 1, | ||||
|         #   'nbHits': 1, | ||||
|         #   'hitsPerPage': 20, | ||||
|         #   'exhaustiveNbHits': true, | ||||
|         #   'params': 'query=xxx', | ||||
|         #   'nbPages': 1, | ||||
|         #   'query': 'xxx', | ||||
|         #   'page': 0 | ||||
|         # } | ||||
|         result = func(*wrapt_args, **wrapt_kwargs) | ||||
| 
 | ||||
|         if isinstance(result, dict): | ||||
|             if result.get('processingTimeMS', None) is not None: | ||||
|                 span.set_metric('processing_time_ms', int(result['processingTimeMS'])) | ||||
| 
 | ||||
|             if result.get('nbHits', None) is not None: | ||||
|                 span.set_metric('number_of_hits', int(result['nbHits'])) | ||||
| 
 | ||||
|         return result | ||||
|  | @ -0,0 +1,72 @@ | |||
| """ | ||||
| This integration provides the ``AsyncioContextProvider`` that follows the execution | ||||
| flow of a ``Task``, making possible to trace asynchronous code built on top | ||||
| of ``asyncio``. To trace asynchronous execution, you must:: | ||||
| 
 | ||||
|     import asyncio | ||||
|     from ddtrace import tracer | ||||
|     from ddtrace.contrib.asyncio import context_provider | ||||
| 
 | ||||
|     # enable asyncio support | ||||
|     tracer.configure(context_provider=context_provider) | ||||
| 
 | ||||
|     async def some_work(): | ||||
|         with tracer.trace('asyncio.some_work'): | ||||
|             # do something | ||||
| 
 | ||||
|     # launch your coroutines as usual | ||||
|     loop = asyncio.get_event_loop() | ||||
|     loop.run_until_complete(some_work()) | ||||
|     loop.close() | ||||
| 
 | ||||
| If ``contextvars`` is available, we use the | ||||
| :class:`ddtrace.provider.DefaultContextProvider`, otherwise we use the legacy | ||||
| :class:`ddtrace.contrib.asyncio.provider.AsyncioContextProvider`. | ||||
| 
 | ||||
| In addition, helpers are provided to simplify how the tracing ``Context`` is | ||||
| handled between scheduled coroutines and ``Future`` invoked in separated | ||||
| threads: | ||||
| 
 | ||||
|     * ``set_call_context(task, ctx)``: attach the context to the given ``Task`` | ||||
|       so that it will be available from the ``tracer.get_call_context()`` | ||||
|     * ``ensure_future(coro_or_future, *, loop=None)``: wrapper for the | ||||
|       ``asyncio.ensure_future`` that attaches the current context to a new | ||||
|       ``Task`` instance | ||||
|     * ``run_in_executor(loop, executor, func, *args)``: wrapper for the | ||||
|       ``loop.run_in_executor`` that attaches the current context to the | ||||
|       new thread so that the trace can be resumed regardless when | ||||
|       it's executed | ||||
|     * ``create_task(coro)``: creates a new asyncio ``Task`` that inherits | ||||
|       the current active ``Context`` so that generated traces in the new task | ||||
|       are attached to the main trace | ||||
| 
 | ||||
| A ``patch(asyncio=True)`` is available if you want to automatically use above | ||||
| wrappers without changing your code. In that case, the patch method **must be | ||||
| called before** importing stdlib functions. | ||||
| """ | ||||
| from ...utils.importlib import require_modules | ||||
| 
 | ||||
| 
 | ||||
| required_modules = ['asyncio'] | ||||
| 
 | ||||
| with require_modules(required_modules) as missing_modules: | ||||
|     if not missing_modules: | ||||
|         from .provider import AsyncioContextProvider | ||||
|         from ...internal.context_manager import CONTEXTVARS_IS_AVAILABLE | ||||
|         from ...provider import DefaultContextProvider | ||||
| 
 | ||||
|         if CONTEXTVARS_IS_AVAILABLE: | ||||
|             context_provider = DefaultContextProvider() | ||||
|         else: | ||||
|             context_provider = AsyncioContextProvider() | ||||
| 
 | ||||
|         from .helpers import set_call_context, ensure_future, run_in_executor | ||||
|         from .patch import patch | ||||
| 
 | ||||
|         __all__ = [ | ||||
|             'context_provider', | ||||
|             'set_call_context', | ||||
|             'ensure_future', | ||||
|             'run_in_executor', | ||||
|             'patch' | ||||
|         ] | ||||
|  | @ -0,0 +1,9 @@ | |||
| import sys | ||||
| 
 | ||||
| # asyncio.Task.current_task method is deprecated and will be removed in Python | ||||
| # 3.9. Instead use asyncio.current_task | ||||
| if sys.version_info >= (3, 7, 0): | ||||
|     from asyncio import current_task as asyncio_current_task | ||||
| else: | ||||
|     import asyncio | ||||
|     asyncio_current_task = asyncio.Task.current_task | ||||
|  | @ -0,0 +1,83 @@ | |||
| """ | ||||
| This module includes a list of convenience methods that | ||||
| can be used to simplify some operations while handling | ||||
| Context and Spans in instrumented ``asyncio`` code. | ||||
| """ | ||||
| import asyncio | ||||
| import ddtrace | ||||
| 
 | ||||
| from .provider import CONTEXT_ATTR | ||||
| from .wrappers import wrapped_create_task | ||||
| from ...context import Context | ||||
| 
 | ||||
| 
 | ||||
| def set_call_context(task, ctx): | ||||
|     """ | ||||
|     Updates the ``Context`` for the given Task. Useful when you need to | ||||
|     pass the context among different tasks. | ||||
| 
 | ||||
|     This method is available for backward-compatibility. Use the | ||||
|     ``AsyncioContextProvider`` API to set the current active ``Context``. | ||||
|     """ | ||||
|     setattr(task, CONTEXT_ATTR, ctx) | ||||
| 
 | ||||
| 
 | ||||
| def ensure_future(coro_or_future, *, loop=None, tracer=None): | ||||
|     """Wrapper that sets a context to the newly created Task. | ||||
| 
 | ||||
|     If the current task already has a Context, it will be attached to the new Task so the Trace list will be preserved. | ||||
|     """ | ||||
|     tracer = tracer or ddtrace.tracer | ||||
|     current_ctx = tracer.get_call_context() | ||||
|     task = asyncio.ensure_future(coro_or_future, loop=loop) | ||||
|     set_call_context(task, current_ctx) | ||||
|     return task | ||||
| 
 | ||||
| 
 | ||||
| def run_in_executor(loop, executor, func, *args, tracer=None): | ||||
|     """Wrapper function that sets a context to the newly created Thread. | ||||
| 
 | ||||
|     If the current task has a Context, it will be attached as an empty Context with the current_span activated to | ||||
|     inherit the ``trace_id`` and the ``parent_id``. | ||||
| 
 | ||||
|     Because the Executor can run the Thread immediately or after the | ||||
|     coroutine is executed, we may have two different scenarios: | ||||
|     * the Context is copied in the new Thread and the trace is sent twice | ||||
|     * the coroutine flushes the Context and when the Thread copies the | ||||
|     Context it is already empty (so it will be a root Span) | ||||
| 
 | ||||
|     To support both situations, we create a new Context that knows only what was | ||||
|     the latest active Span when the new thread was created. In this new thread, | ||||
|     we fallback to the thread-local ``Context`` storage. | ||||
| 
 | ||||
|     """ | ||||
|     tracer = tracer or ddtrace.tracer | ||||
|     ctx = Context() | ||||
|     current_ctx = tracer.get_call_context() | ||||
|     ctx._current_span = current_ctx._current_span | ||||
| 
 | ||||
|     # prepare the future using an executor wrapper | ||||
|     future = loop.run_in_executor(executor, _wrap_executor, func, args, tracer, ctx) | ||||
|     return future | ||||
| 
 | ||||
| 
 | ||||
| def _wrap_executor(fn, args, tracer, ctx): | ||||
|     """ | ||||
|     This function is executed in the newly created Thread so the right | ||||
|     ``Context`` can be set in the thread-local storage. This operation | ||||
|     is safe because the ``Context`` class is thread-safe and can be | ||||
|     updated concurrently. | ||||
|     """ | ||||
|     # the AsyncioContextProvider knows that this is a new thread | ||||
|     # so it is legit to pass the Context in the thread-local storage; | ||||
|     # fn() will be executed outside the asyncio loop as a synchronous code | ||||
|     tracer.context_provider.activate(ctx) | ||||
|     return fn(*args) | ||||
| 
 | ||||
| 
 | ||||
| def create_task(*args, **kwargs): | ||||
|     """This function spawns a task with a Context that inherits the | ||||
|     `trace_id` and the `parent_id` from the current active one if available. | ||||
|     """ | ||||
|     loop = asyncio.get_event_loop() | ||||
|     return wrapped_create_task(loop.create_task, None, args, kwargs) | ||||
|  | @ -0,0 +1,32 @@ | |||
| import asyncio | ||||
| 
 | ||||
| from ddtrace.vendor.wrapt import wrap_function_wrapper as _w | ||||
| 
 | ||||
| from ...internal.context_manager import CONTEXTVARS_IS_AVAILABLE | ||||
| from .wrappers import wrapped_create_task, wrapped_create_task_contextvars | ||||
| from ...utils.wrappers import unwrap as _u | ||||
| 
 | ||||
| 
 | ||||
| def patch(): | ||||
|     """Patches current loop `create_task()` method to enable spawned tasks to | ||||
|     parent to the base task context. | ||||
|     """ | ||||
|     if getattr(asyncio, '_datadog_patch', False): | ||||
|         return | ||||
|     setattr(asyncio, '_datadog_patch', True) | ||||
| 
 | ||||
|     loop = asyncio.get_event_loop() | ||||
|     if CONTEXTVARS_IS_AVAILABLE: | ||||
|         _w(loop, 'create_task', wrapped_create_task_contextvars) | ||||
|     else: | ||||
|         _w(loop, 'create_task', wrapped_create_task) | ||||
| 
 | ||||
| 
 | ||||
| def unpatch(): | ||||
|     """Remove tracing from patched modules.""" | ||||
| 
 | ||||
|     if getattr(asyncio, '_datadog_patch', False): | ||||
|         setattr(asyncio, '_datadog_patch', False) | ||||
| 
 | ||||
|     loop = asyncio.get_event_loop() | ||||
|     _u(loop, 'create_task') | ||||
|  | @ -0,0 +1,86 @@ | |||
| import asyncio | ||||
| 
 | ||||
| from ...context import Context | ||||
| from ...provider import DefaultContextProvider | ||||
| 
 | ||||
| # Task attribute used to set/get the Context instance | ||||
| CONTEXT_ATTR = '__datadog_context' | ||||
| 
 | ||||
| 
 | ||||
| class AsyncioContextProvider(DefaultContextProvider): | ||||
|     """ | ||||
|     Context provider that retrieves all contexts for the current asyncio | ||||
|     execution. It must be used in asynchronous programming that relies | ||||
|     in the built-in ``asyncio`` library. Framework instrumentation that | ||||
|     is built on top of the ``asyncio`` library, can use this provider. | ||||
| 
 | ||||
|     This Context Provider inherits from ``DefaultContextProvider`` because | ||||
|     it uses a thread-local storage when the ``Context`` is propagated to | ||||
|     a different thread, than the one that is running the async loop. | ||||
|     """ | ||||
|     def activate(self, context, loop=None): | ||||
|         """Sets the scoped ``Context`` for the current running ``Task``. | ||||
|         """ | ||||
|         loop = self._get_loop(loop) | ||||
|         if not loop: | ||||
|             self._local.set(context) | ||||
|             return context | ||||
| 
 | ||||
|         # the current unit of work (if tasks are used) | ||||
|         task = asyncio.Task.current_task(loop=loop) | ||||
|         setattr(task, CONTEXT_ATTR, context) | ||||
|         return context | ||||
| 
 | ||||
|     def _get_loop(self, loop=None): | ||||
|         """Helper to try and resolve the current loop""" | ||||
|         try: | ||||
|             return loop or asyncio.get_event_loop() | ||||
|         except RuntimeError: | ||||
|             # Detects if a loop is available in the current thread; | ||||
|             # DEV: This happens when a new thread is created from the out that is running the async loop | ||||
|             # DEV: It's possible that a different Executor is handling a different Thread that | ||||
|             #      works with blocking code. In that case, we fallback to a thread-local Context. | ||||
|             pass | ||||
|         return None | ||||
| 
 | ||||
|     def _has_active_context(self, loop=None): | ||||
|         """Helper to determine if we have a currently active context""" | ||||
|         loop = self._get_loop(loop=loop) | ||||
|         if loop is None: | ||||
|             return self._local._has_active_context() | ||||
| 
 | ||||
|         # the current unit of work (if tasks are used) | ||||
|         task = asyncio.Task.current_task(loop=loop) | ||||
|         if task is None: | ||||
|             return False | ||||
| 
 | ||||
|         ctx = getattr(task, CONTEXT_ATTR, None) | ||||
|         return ctx is not None | ||||
| 
 | ||||
|     def active(self, loop=None): | ||||
|         """ | ||||
|         Returns the scoped Context for this execution flow. The ``Context`` uses | ||||
|         the current task as a carrier so if a single task is used for the entire application, | ||||
|         the context must be handled separately. | ||||
|         """ | ||||
|         loop = self._get_loop(loop=loop) | ||||
|         if not loop: | ||||
|             return self._local.get() | ||||
| 
 | ||||
|         # the current unit of work (if tasks are used) | ||||
|         task = asyncio.Task.current_task(loop=loop) | ||||
|         if task is None: | ||||
|             # providing a detached Context from the current Task, may lead to | ||||
|             # wrong traces. This defensive behavior grants that a trace can | ||||
|             # still be built without raising exceptions | ||||
|             return Context() | ||||
| 
 | ||||
|         ctx = getattr(task, CONTEXT_ATTR, None) | ||||
|         if ctx is not None: | ||||
|             # return the active Context for this task (if any) | ||||
|             return ctx | ||||
| 
 | ||||
|         # create a new Context using the Task as a Context carrier | ||||
|         ctx = Context() | ||||
|         setattr(task, CONTEXT_ATTR, ctx) | ||||
|         return ctx | ||||
|  | @ -0,0 +1,58 @@ | |||
| import ddtrace | ||||
| 
 | ||||
| from .compat import asyncio_current_task | ||||
| from .provider import CONTEXT_ATTR | ||||
| from ...context import Context | ||||
| 
 | ||||
| 
 | ||||
| def wrapped_create_task(wrapped, instance, args, kwargs): | ||||
|     """Wrapper for ``create_task(coro)`` that propagates the current active | ||||
|     ``Context`` to the new ``Task``. This function is useful to connect traces | ||||
|     of detached executions. | ||||
| 
 | ||||
|     Note: we can't just link the task contexts due to the following scenario: | ||||
|         * begin task A | ||||
|         * task A starts task B1..B10 | ||||
|         * finish task B1-B9 (B10 still on trace stack) | ||||
|         * task A starts task C | ||||
|         * now task C gets parented to task B10 since it's still on the stack, | ||||
|           however was not actually triggered by B10 | ||||
|     """ | ||||
|     new_task = wrapped(*args, **kwargs) | ||||
|     current_task = asyncio_current_task() | ||||
| 
 | ||||
|     ctx = getattr(current_task, CONTEXT_ATTR, None) | ||||
|     if ctx: | ||||
|         # current task has a context, so parent a new context to the base context | ||||
|         new_ctx = Context( | ||||
|             trace_id=ctx.trace_id, | ||||
|             span_id=ctx.span_id, | ||||
|             sampling_priority=ctx.sampling_priority, | ||||
|         ) | ||||
|         setattr(new_task, CONTEXT_ATTR, new_ctx) | ||||
| 
 | ||||
|     return new_task | ||||
| 
 | ||||
| 
 | ||||
| def wrapped_create_task_contextvars(wrapped, instance, args, kwargs): | ||||
|     """Wrapper for ``create_task(coro)`` that propagates the current active | ||||
|     ``Context`` to the new ``Task``. This function is useful to connect traces | ||||
|     of detached executions. Uses contextvars for task-local storage. | ||||
|     """ | ||||
|     current_task_ctx = ddtrace.tracer.get_call_context() | ||||
| 
 | ||||
|     if not current_task_ctx: | ||||
|         # no current context exists so nothing special to be done in handling | ||||
|         # context for new task | ||||
|         return wrapped(*args, **kwargs) | ||||
| 
 | ||||
|     # clone and activate current task's context for new task to support | ||||
|     # detached executions | ||||
|     new_task_ctx = current_task_ctx.clone() | ||||
|     ddtrace.tracer.context_provider.activate(new_task_ctx) | ||||
|     try: | ||||
|         # activated context will now be copied to new task | ||||
|         return wrapped(*args, **kwargs) | ||||
|     finally: | ||||
|         # reactivate current task context | ||||
|         ddtrace.tracer.context_provider.activate(current_task_ctx) | ||||
|  | @ -0,0 +1,24 @@ | |||
| """ | ||||
| Boto integration will trace all AWS calls made via boto2. | ||||
| This integration is automatically patched when using ``patch_all()``:: | ||||
| 
 | ||||
|     import boto.ec2 | ||||
|     from ddtrace import patch | ||||
| 
 | ||||
|     # If not patched yet, you can patch boto specifically | ||||
|     patch(boto=True) | ||||
| 
 | ||||
|     # This will report spans with the default instrumentation | ||||
|     ec2 = boto.ec2.connect_to_region("us-west-2") | ||||
|     # Example of instrumented query | ||||
|     ec2.get_all_instances() | ||||
| """ | ||||
| 
 | ||||
| from ...utils.importlib import require_modules | ||||
| 
 | ||||
| required_modules = ['boto.connection'] | ||||
| 
 | ||||
| with require_modules(required_modules) as missing_modules: | ||||
|     if not missing_modules: | ||||
|         from .patch import patch | ||||
|         __all__ = ['patch'] | ||||
|  | @ -0,0 +1,183 @@ | |||
| import boto.connection | ||||
| from ddtrace.vendor import wrapt | ||||
| import inspect | ||||
| 
 | ||||
| from ddtrace import config | ||||
| from ...constants import ANALYTICS_SAMPLE_RATE_KEY | ||||
| from ...pin import Pin | ||||
| from ...ext import SpanTypes, http, aws | ||||
| from ...utils.wrappers import unwrap | ||||
| 
 | ||||
| 
 | ||||
| # Original boto client class | ||||
| _Boto_client = boto.connection.AWSQueryConnection | ||||
| 
 | ||||
| AWS_QUERY_ARGS_NAME = ('operation_name', 'params', 'path', 'verb') | ||||
| AWS_AUTH_ARGS_NAME = ( | ||||
|     'method', | ||||
|     'path', | ||||
|     'headers', | ||||
|     'data', | ||||
|     'host', | ||||
|     'auth_path', | ||||
|     'sender', | ||||
| ) | ||||
| AWS_QUERY_TRACED_ARGS = ['operation_name', 'params', 'path'] | ||||
| AWS_AUTH_TRACED_ARGS = ['path', 'data', 'host'] | ||||
| 
 | ||||
| 
 | ||||
| def patch(): | ||||
|     if getattr(boto.connection, '_datadog_patch', False): | ||||
|         return | ||||
|     setattr(boto.connection, '_datadog_patch', True) | ||||
| 
 | ||||
|     # AWSQueryConnection and AWSAuthConnection are two different classes called by | ||||
|     # different services for connection. | ||||
|     # For exemple EC2 uses AWSQueryConnection and S3 uses AWSAuthConnection | ||||
|     wrapt.wrap_function_wrapper( | ||||
|         'boto.connection', 'AWSQueryConnection.make_request', patched_query_request | ||||
|     ) | ||||
|     wrapt.wrap_function_wrapper( | ||||
|         'boto.connection', 'AWSAuthConnection.make_request', patched_auth_request | ||||
|     ) | ||||
|     Pin(service='aws', app='aws').onto( | ||||
|         boto.connection.AWSQueryConnection | ||||
|     ) | ||||
|     Pin(service='aws', app='aws').onto( | ||||
|         boto.connection.AWSAuthConnection | ||||
|     ) | ||||
| 
 | ||||
| 
 | ||||
| def unpatch(): | ||||
|     if getattr(boto.connection, '_datadog_patch', False): | ||||
|         setattr(boto.connection, '_datadog_patch', False) | ||||
|         unwrap(boto.connection.AWSQueryConnection, 'make_request') | ||||
|         unwrap(boto.connection.AWSAuthConnection, 'make_request') | ||||
| 
 | ||||
| 
 | ||||
| # ec2, sqs, kinesis | ||||
| def patched_query_request(original_func, instance, args, kwargs): | ||||
| 
 | ||||
|     pin = Pin.get_from(instance) | ||||
|     if not pin or not pin.enabled(): | ||||
|         return original_func(*args, **kwargs) | ||||
| 
 | ||||
|     endpoint_name = getattr(instance, 'host').split('.')[0] | ||||
| 
 | ||||
|     with pin.tracer.trace( | ||||
|         '{}.command'.format(endpoint_name), | ||||
|         service='{}.{}'.format(pin.service, endpoint_name), | ||||
|         span_type=SpanTypes.HTTP, | ||||
|     ) as span: | ||||
| 
 | ||||
|         operation_name = None | ||||
|         if args: | ||||
|             operation_name = args[0] | ||||
|             span.resource = '%s.%s' % (endpoint_name, operation_name.lower()) | ||||
|         else: | ||||
|             span.resource = endpoint_name | ||||
| 
 | ||||
|         aws.add_span_arg_tags(span, endpoint_name, args, AWS_QUERY_ARGS_NAME, AWS_QUERY_TRACED_ARGS) | ||||
| 
 | ||||
|         # Obtaining region name | ||||
|         region_name = _get_instance_region_name(instance) | ||||
| 
 | ||||
|         meta = { | ||||
|             aws.AGENT: 'boto', | ||||
|             aws.OPERATION: operation_name, | ||||
|         } | ||||
|         if region_name: | ||||
|             meta[aws.REGION] = region_name | ||||
| 
 | ||||
|         span.set_tags(meta) | ||||
| 
 | ||||
|         # Original func returns a boto.connection.HTTPResponse object | ||||
|         result = original_func(*args, **kwargs) | ||||
|         span.set_tag(http.STATUS_CODE, getattr(result, 'status')) | ||||
|         span.set_tag(http.METHOD, getattr(result, '_method')) | ||||
| 
 | ||||
|         # set analytics sample rate | ||||
|         span.set_tag( | ||||
|             ANALYTICS_SAMPLE_RATE_KEY, | ||||
|             config.boto.get_analytics_sample_rate() | ||||
|         ) | ||||
| 
 | ||||
|         return result | ||||
| 
 | ||||
| 
 | ||||
| # s3, lambda | ||||
| def patched_auth_request(original_func, instance, args, kwargs): | ||||
| 
 | ||||
|     # Catching the name of the operation that called make_request() | ||||
|     operation_name = None | ||||
| 
 | ||||
|     # Go up the stack until we get the first non-ddtrace module | ||||
|     # DEV: For `lambda.list_functions()` this should be: | ||||
|     #        - ddtrace.contrib.boto.patch | ||||
|     #        - ddtrace.vendor.wrapt.wrappers | ||||
|     #        - boto.awslambda.layer1 (make_request) | ||||
|     #        - boto.awslambda.layer1 (list_functions) | ||||
|     # But can vary depending on Python versions; that's why we use an heuristic | ||||
|     frame = inspect.currentframe().f_back | ||||
|     operation_name = None | ||||
|     while frame: | ||||
|         if frame.f_code.co_name == 'make_request': | ||||
|             operation_name = frame.f_back.f_code.co_name | ||||
|             break | ||||
|         frame = frame.f_back | ||||
| 
 | ||||
|     pin = Pin.get_from(instance) | ||||
|     if not pin or not pin.enabled(): | ||||
|         return original_func(*args, **kwargs) | ||||
| 
 | ||||
|     endpoint_name = getattr(instance, 'host').split('.')[0] | ||||
| 
 | ||||
|     with pin.tracer.trace( | ||||
|         '{}.command'.format(endpoint_name), | ||||
|         service='{}.{}'.format(pin.service, endpoint_name), | ||||
|         span_type=SpanTypes.HTTP, | ||||
|     ) as span: | ||||
| 
 | ||||
|         if args: | ||||
|             http_method = args[0] | ||||
|             span.resource = '%s.%s' % (endpoint_name, http_method.lower()) | ||||
|         else: | ||||
|             span.resource = endpoint_name | ||||
| 
 | ||||
|         aws.add_span_arg_tags(span, endpoint_name, args, AWS_AUTH_ARGS_NAME, AWS_AUTH_TRACED_ARGS) | ||||
| 
 | ||||
|         # Obtaining region name | ||||
|         region_name = _get_instance_region_name(instance) | ||||
| 
 | ||||
|         meta = { | ||||
|             aws.AGENT: 'boto', | ||||
|             aws.OPERATION: operation_name, | ||||
|         } | ||||
|         if region_name: | ||||
|             meta[aws.REGION] = region_name | ||||
| 
 | ||||
|         span.set_tags(meta) | ||||
| 
 | ||||
|         # Original func returns a boto.connection.HTTPResponse object | ||||
|         result = original_func(*args, **kwargs) | ||||
|         span.set_tag(http.STATUS_CODE, getattr(result, 'status')) | ||||
|         span.set_tag(http.METHOD, getattr(result, '_method')) | ||||
| 
 | ||||
|         # set analytics sample rate | ||||
|         span.set_tag( | ||||
|             ANALYTICS_SAMPLE_RATE_KEY, | ||||
|             config.boto.get_analytics_sample_rate() | ||||
|         ) | ||||
| 
 | ||||
|         return result | ||||
| 
 | ||||
| 
 | ||||
| def _get_instance_region_name(instance): | ||||
|     region = getattr(instance, 'region', None) | ||||
| 
 | ||||
|     if not region: | ||||
|         return None | ||||
|     if isinstance(region, str): | ||||
|         return region.split(':')[1] | ||||
|     else: | ||||
|         return region.name | ||||
|  | @ -0,0 +1,28 @@ | |||
| """ | ||||
| The Botocore integration will trace all AWS calls made with the botocore | ||||
| library. Libraries like Boto3 that use Botocore will also be patched. | ||||
| 
 | ||||
| This integration is automatically patched when using ``patch_all()``:: | ||||
| 
 | ||||
|     import botocore.session | ||||
|     from ddtrace import patch | ||||
| 
 | ||||
|     # If not patched yet, you can patch botocore specifically | ||||
|     patch(botocore=True) | ||||
| 
 | ||||
|     # This will report spans with the default instrumentation | ||||
|     botocore.session.get_session() | ||||
|     lambda_client = session.create_client('lambda', region_name='us-east-1') | ||||
|     # Example of instrumented query | ||||
|     lambda_client.list_functions() | ||||
| """ | ||||
| 
 | ||||
| 
 | ||||
| from ...utils.importlib import require_modules | ||||
| 
 | ||||
| required_modules = ['botocore.client'] | ||||
| 
 | ||||
| with require_modules(required_modules) as missing_modules: | ||||
|     if not missing_modules: | ||||
|         from .patch import patch | ||||
|         __all__ = ['patch'] | ||||
|  | @ -0,0 +1,81 @@ | |||
| """ | ||||
| Trace queries to aws api done via botocore client | ||||
| """ | ||||
| # 3p | ||||
| from ddtrace.vendor import wrapt | ||||
| from ddtrace import config | ||||
| import botocore.client | ||||
| 
 | ||||
| # project | ||||
| from ...constants import ANALYTICS_SAMPLE_RATE_KEY | ||||
| from ...pin import Pin | ||||
| from ...ext import SpanTypes, http, aws | ||||
| from ...utils.formats import deep_getattr | ||||
| from ...utils.wrappers import unwrap | ||||
| 
 | ||||
| 
 | ||||
| # Original botocore client class | ||||
| _Botocore_client = botocore.client.BaseClient | ||||
| 
 | ||||
| ARGS_NAME = ('action', 'params', 'path', 'verb') | ||||
| TRACED_ARGS = ['params', 'path', 'verb'] | ||||
| 
 | ||||
| 
 | ||||
| def patch(): | ||||
|     if getattr(botocore.client, '_datadog_patch', False): | ||||
|         return | ||||
|     setattr(botocore.client, '_datadog_patch', True) | ||||
| 
 | ||||
|     wrapt.wrap_function_wrapper('botocore.client', 'BaseClient._make_api_call', patched_api_call) | ||||
|     Pin(service='aws', app='aws').onto(botocore.client.BaseClient) | ||||
| 
 | ||||
| 
 | ||||
| def unpatch(): | ||||
|     if getattr(botocore.client, '_datadog_patch', False): | ||||
|         setattr(botocore.client, '_datadog_patch', False) | ||||
|         unwrap(botocore.client.BaseClient, '_make_api_call') | ||||
| 
 | ||||
| 
 | ||||
| def patched_api_call(original_func, instance, args, kwargs): | ||||
| 
 | ||||
|     pin = Pin.get_from(instance) | ||||
|     if not pin or not pin.enabled(): | ||||
|         return original_func(*args, **kwargs) | ||||
| 
 | ||||
|     endpoint_name = deep_getattr(instance, '_endpoint._endpoint_prefix') | ||||
| 
 | ||||
|     with pin.tracer.trace('{}.command'.format(endpoint_name), | ||||
|                           service='{}.{}'.format(pin.service, endpoint_name), | ||||
|                           span_type=SpanTypes.HTTP) as span: | ||||
| 
 | ||||
|         operation = None | ||||
|         if args: | ||||
|             operation = args[0] | ||||
|             span.resource = '%s.%s' % (endpoint_name, operation.lower()) | ||||
| 
 | ||||
|         else: | ||||
|             span.resource = endpoint_name | ||||
| 
 | ||||
|         aws.add_span_arg_tags(span, endpoint_name, args, ARGS_NAME, TRACED_ARGS) | ||||
| 
 | ||||
|         region_name = deep_getattr(instance, 'meta.region_name') | ||||
| 
 | ||||
|         meta = { | ||||
|             'aws.agent': 'botocore', | ||||
|             'aws.operation': operation, | ||||
|             'aws.region': region_name, | ||||
|         } | ||||
|         span.set_tags(meta) | ||||
| 
 | ||||
|         result = original_func(*args, **kwargs) | ||||
| 
 | ||||
|         span.set_tag(http.STATUS_CODE, result['ResponseMetadata']['HTTPStatusCode']) | ||||
|         span.set_tag('retry_attempts', result['ResponseMetadata']['RetryAttempts']) | ||||
| 
 | ||||
|         # set analytics sample rate | ||||
|         span.set_tag( | ||||
|             ANALYTICS_SAMPLE_RATE_KEY, | ||||
|             config.botocore.get_analytics_sample_rate() | ||||
|         ) | ||||
| 
 | ||||
|         return result | ||||
|  | @ -0,0 +1,23 @@ | |||
| """ | ||||
| The bottle integration traces the Bottle web framework. Add the following | ||||
| plugin to your app:: | ||||
| 
 | ||||
|     import bottle | ||||
|     from ddtrace import tracer | ||||
|     from ddtrace.contrib.bottle import TracePlugin | ||||
| 
 | ||||
|     app = bottle.Bottle() | ||||
|     plugin = TracePlugin(service="my-web-app") | ||||
|     app.install(plugin) | ||||
| """ | ||||
| 
 | ||||
| from ...utils.importlib import require_modules | ||||
| 
 | ||||
| required_modules = ['bottle'] | ||||
| 
 | ||||
| with require_modules(required_modules) as missing_modules: | ||||
|     if not missing_modules: | ||||
|         from .trace import TracePlugin | ||||
|         from .patch import patch | ||||
| 
 | ||||
|         __all__ = ['TracePlugin', 'patch'] | ||||
|  | @ -0,0 +1,26 @@ | |||
| import os | ||||
| 
 | ||||
| from .trace import TracePlugin | ||||
| 
 | ||||
| import bottle | ||||
| 
 | ||||
| from ddtrace.vendor import wrapt | ||||
| 
 | ||||
| 
 | ||||
| def patch(): | ||||
|     """Patch the bottle.Bottle class | ||||
|     """ | ||||
|     if getattr(bottle, '_datadog_patch', False): | ||||
|         return | ||||
| 
 | ||||
|     setattr(bottle, '_datadog_patch', True) | ||||
|     wrapt.wrap_function_wrapper('bottle', 'Bottle.__init__', traced_init) | ||||
| 
 | ||||
| 
 | ||||
| def traced_init(wrapped, instance, args, kwargs): | ||||
|     wrapped(*args, **kwargs) | ||||
| 
 | ||||
|     service = os.environ.get('DATADOG_SERVICE_NAME') or 'bottle' | ||||
| 
 | ||||
|     plugin = TracePlugin(service=service) | ||||
|     instance.install(plugin) | ||||
|  | @ -0,0 +1,83 @@ | |||
| # 3p | ||||
| from bottle import response, request, HTTPError, HTTPResponse | ||||
| 
 | ||||
| # stdlib | ||||
| import ddtrace | ||||
| 
 | ||||
| # project | ||||
| from ...constants import ANALYTICS_SAMPLE_RATE_KEY | ||||
| from ...ext import SpanTypes, http | ||||
| from ...propagation.http import HTTPPropagator | ||||
| from ...settings import config | ||||
| 
 | ||||
| 
 | ||||
| class TracePlugin(object): | ||||
|     name = 'trace' | ||||
|     api = 2 | ||||
| 
 | ||||
|     def __init__(self, service='bottle', tracer=None, distributed_tracing=True): | ||||
|         self.service = service | ||||
|         self.tracer = tracer or ddtrace.tracer | ||||
|         self.distributed_tracing = distributed_tracing | ||||
| 
 | ||||
|     def apply(self, callback, route): | ||||
| 
 | ||||
|         def wrapped(*args, **kwargs): | ||||
|             if not self.tracer or not self.tracer.enabled: | ||||
|                 return callback(*args, **kwargs) | ||||
| 
 | ||||
|             resource = '{} {}'.format(request.method, route.rule) | ||||
| 
 | ||||
|             # Propagate headers such as x-datadog-trace-id. | ||||
|             if self.distributed_tracing: | ||||
|                 propagator = HTTPPropagator() | ||||
|                 context = propagator.extract(request.headers) | ||||
|                 if context.trace_id: | ||||
|                     self.tracer.context_provider.activate(context) | ||||
| 
 | ||||
|             with self.tracer.trace( | ||||
|                 'bottle.request', service=self.service, resource=resource, span_type=SpanTypes.WEB | ||||
|             ) as s: | ||||
|                 # set analytics sample rate with global config enabled | ||||
|                 s.set_tag( | ||||
|                     ANALYTICS_SAMPLE_RATE_KEY, | ||||
|                     config.bottle.get_analytics_sample_rate(use_global_config=True) | ||||
|                 ) | ||||
| 
 | ||||
|                 code = None | ||||
|                 result = None | ||||
|                 try: | ||||
|                     result = callback(*args, **kwargs) | ||||
|                     return result | ||||
|                 except (HTTPError, HTTPResponse) as e: | ||||
|                     # you can interrupt flows using abort(status_code, 'message')... | ||||
|                     # we need to respect the defined status_code. | ||||
|                     # we also need to handle when response is raised as is the | ||||
|                     # case with a 4xx status | ||||
|                     code = e.status_code | ||||
|                     raise | ||||
|                 except Exception: | ||||
|                     # bottle doesn't always translate unhandled exceptions, so | ||||
|                     # we mark it here. | ||||
|                     code = 500 | ||||
|                     raise | ||||
|                 finally: | ||||
|                     if isinstance(result, HTTPResponse): | ||||
|                         response_code = result.status_code | ||||
|                     elif code: | ||||
|                         response_code = code | ||||
|                     else: | ||||
|                         # bottle local response has not yet been updated so this | ||||
|                         # will be default | ||||
|                         response_code = response.status_code | ||||
| 
 | ||||
|                     if 500 <= response_code < 600: | ||||
|                         s.error = 1 | ||||
| 
 | ||||
|                     s.set_tag(http.STATUS_CODE, response_code) | ||||
|                     s.set_tag(http.URL, request.urlparts._replace(query='').geturl()) | ||||
|                     s.set_tag(http.METHOD, request.method) | ||||
|                     if config.bottle.trace_query_string: | ||||
|                         s.set_tag(http.QUERY_STRING, request.query_string) | ||||
| 
 | ||||
|         return wrapped | ||||
|  | @ -0,0 +1,35 @@ | |||
| """Instrument Cassandra to report Cassandra queries. | ||||
| 
 | ||||
| ``patch_all`` will automatically patch your Cluster instance to make it work. | ||||
| :: | ||||
| 
 | ||||
|     from ddtrace import Pin, patch | ||||
|     from cassandra.cluster import Cluster | ||||
| 
 | ||||
|     # If not patched yet, you can patch cassandra specifically | ||||
|     patch(cassandra=True) | ||||
| 
 | ||||
|     # This will report spans with the default instrumentation | ||||
|     cluster = Cluster(contact_points=["127.0.0.1"], port=9042) | ||||
|     session = cluster.connect("my_keyspace") | ||||
|     # Example of instrumented query | ||||
|     session.execute("select id from my_table limit 10;") | ||||
| 
 | ||||
|     # Use a pin to specify metadata related to this cluster | ||||
|     cluster = Cluster(contact_points=['10.1.1.3', '10.1.1.4', '10.1.1.5'], port=9042) | ||||
|     Pin.override(cluster, service='cassandra-backend') | ||||
|     session = cluster.connect("my_keyspace") | ||||
|     session.execute("select id from my_table limit 10;") | ||||
| """ | ||||
| from ...utils.importlib import require_modules | ||||
| 
 | ||||
| 
 | ||||
| required_modules = ['cassandra.cluster'] | ||||
| 
 | ||||
| with require_modules(required_modules) as missing_modules: | ||||
|     if not missing_modules: | ||||
|         from .session import get_traced_cassandra, patch | ||||
|         __all__ = [ | ||||
|             'get_traced_cassandra', | ||||
|             'patch', | ||||
|         ] | ||||
|  | @ -0,0 +1,3 @@ | |||
| from .session import patch, unpatch | ||||
| 
 | ||||
| __all__ = ['patch', 'unpatch'] | ||||
|  | @ -0,0 +1,297 @@ | |||
| """ | ||||
| Trace queries along a session to a cassandra cluster | ||||
| """ | ||||
| import sys | ||||
| 
 | ||||
| # 3p | ||||
| import cassandra.cluster | ||||
| 
 | ||||
| # project | ||||
| from ...compat import stringify | ||||
| from ...constants import ANALYTICS_SAMPLE_RATE_KEY | ||||
| from ...ext import SpanTypes, net, cassandra as cassx, errors | ||||
| from ...internal.logger import get_logger | ||||
| from ...pin import Pin | ||||
| from ...settings import config | ||||
| from ...utils.deprecation import deprecated | ||||
| from ...utils.formats import deep_getattr | ||||
| from ...vendor import wrapt | ||||
| 
 | ||||
| log = get_logger(__name__) | ||||
| 
 | ||||
| RESOURCE_MAX_LENGTH = 5000 | ||||
| SERVICE = 'cassandra' | ||||
| CURRENT_SPAN = '_ddtrace_current_span' | ||||
| PAGE_NUMBER = '_ddtrace_page_number' | ||||
| 
 | ||||
| # Original connect connect function | ||||
| _connect = cassandra.cluster.Cluster.connect | ||||
| 
 | ||||
| 
 | ||||
| def patch(): | ||||
|     """ patch will add tracing to the cassandra library. """ | ||||
|     setattr(cassandra.cluster.Cluster, 'connect', | ||||
|             wrapt.FunctionWrapper(_connect, traced_connect)) | ||||
|     Pin(service=SERVICE, app=SERVICE).onto(cassandra.cluster.Cluster) | ||||
| 
 | ||||
| 
 | ||||
| def unpatch(): | ||||
|     cassandra.cluster.Cluster.connect = _connect | ||||
| 
 | ||||
| 
 | ||||
| def traced_connect(func, instance, args, kwargs): | ||||
|     session = func(*args, **kwargs) | ||||
|     if not isinstance(session.execute, wrapt.FunctionWrapper): | ||||
|         # FIXME[matt] this should probably be private. | ||||
|         setattr(session, 'execute_async', wrapt.FunctionWrapper(session.execute_async, traced_execute_async)) | ||||
|     return session | ||||
| 
 | ||||
| 
 | ||||
| def _close_span_on_success(result, future): | ||||
|     span = getattr(future, CURRENT_SPAN, None) | ||||
|     if not span: | ||||
|         log.debug('traced_set_final_result was not able to get the current span from the ResponseFuture') | ||||
|         return | ||||
|     try: | ||||
|         span.set_tags(_extract_result_metas(cassandra.cluster.ResultSet(future, result))) | ||||
|     except Exception: | ||||
|         log.debug('an exception occured while setting tags', exc_info=True) | ||||
|     finally: | ||||
|         span.finish() | ||||
|         delattr(future, CURRENT_SPAN) | ||||
| 
 | ||||
| 
 | ||||
| def traced_set_final_result(func, instance, args, kwargs): | ||||
|     result = args[0] | ||||
|     _close_span_on_success(result, instance) | ||||
|     return func(*args, **kwargs) | ||||
| 
 | ||||
| 
 | ||||
| def _close_span_on_error(exc, future): | ||||
|     span = getattr(future, CURRENT_SPAN, None) | ||||
|     if not span: | ||||
|         log.debug('traced_set_final_exception was not able to get the current span from the ResponseFuture') | ||||
|         return | ||||
|     try: | ||||
|         # handling the exception manually because we | ||||
|         # don't have an ongoing exception here | ||||
|         span.error = 1 | ||||
|         span.set_tag(errors.ERROR_MSG, exc.args[0]) | ||||
|         span.set_tag(errors.ERROR_TYPE, exc.__class__.__name__) | ||||
|     except Exception: | ||||
|         log.debug('traced_set_final_exception was not able to set the error, failed with error', exc_info=True) | ||||
|     finally: | ||||
|         span.finish() | ||||
|         delattr(future, CURRENT_SPAN) | ||||
| 
 | ||||
| 
 | ||||
| def traced_set_final_exception(func, instance, args, kwargs): | ||||
|     exc = args[0] | ||||
|     _close_span_on_error(exc, instance) | ||||
|     return func(*args, **kwargs) | ||||
| 
 | ||||
| 
 | ||||
| def traced_start_fetching_next_page(func, instance, args, kwargs): | ||||
|     has_more_pages = getattr(instance, 'has_more_pages', True) | ||||
|     if not has_more_pages: | ||||
|         return func(*args, **kwargs) | ||||
|     session = getattr(instance, 'session', None) | ||||
|     cluster = getattr(session, 'cluster', None) | ||||
|     pin = Pin.get_from(cluster) | ||||
|     if not pin or not pin.enabled(): | ||||
|         return func(*args, **kwargs) | ||||
| 
 | ||||
|     # In case the current span is not finished we make sure to finish it | ||||
|     old_span = getattr(instance, CURRENT_SPAN, None) | ||||
|     if old_span: | ||||
|         log.debug('previous span was not finished before fetching next page') | ||||
|         old_span.finish() | ||||
| 
 | ||||
|     query = getattr(instance, 'query', None) | ||||
| 
 | ||||
|     span = _start_span_and_set_tags(pin, query, session, cluster) | ||||
| 
 | ||||
|     page_number = getattr(instance, PAGE_NUMBER, 1) + 1 | ||||
|     setattr(instance, PAGE_NUMBER, page_number) | ||||
|     setattr(instance, CURRENT_SPAN, span) | ||||
|     try: | ||||
|         return func(*args, **kwargs) | ||||
|     except Exception: | ||||
|         with span: | ||||
|             span.set_exc_info(*sys.exc_info()) | ||||
|         raise | ||||
| 
 | ||||
| 
 | ||||
| def traced_execute_async(func, instance, args, kwargs): | ||||
|     cluster = getattr(instance, 'cluster', None) | ||||
|     pin = Pin.get_from(cluster) | ||||
|     if not pin or not pin.enabled(): | ||||
|         return func(*args, **kwargs) | ||||
| 
 | ||||
|     query = kwargs.get('query') or args[0] | ||||
| 
 | ||||
|     span = _start_span_and_set_tags(pin, query, instance, cluster) | ||||
| 
 | ||||
|     try: | ||||
|         result = func(*args, **kwargs) | ||||
|         setattr(result, CURRENT_SPAN, span) | ||||
|         setattr(result, PAGE_NUMBER, 1) | ||||
|         setattr( | ||||
|             result, | ||||
|             '_set_final_result', | ||||
|             wrapt.FunctionWrapper( | ||||
|                 result._set_final_result, | ||||
|                 traced_set_final_result | ||||
|             ) | ||||
|         ) | ||||
|         setattr( | ||||
|             result, | ||||
|             '_set_final_exception', | ||||
|             wrapt.FunctionWrapper( | ||||
|                 result._set_final_exception, | ||||
|                 traced_set_final_exception | ||||
|             ) | ||||
|         ) | ||||
|         setattr( | ||||
|             result, | ||||
|             'start_fetching_next_page', | ||||
|             wrapt.FunctionWrapper( | ||||
|                 result.start_fetching_next_page, | ||||
|                 traced_start_fetching_next_page | ||||
|             ) | ||||
|         ) | ||||
|         # Since we cannot be sure that the previous methods were overwritten | ||||
|         # before the call ended, we add callbacks that will be run | ||||
|         # synchronously if the call already returned and we remove them right | ||||
|         # after. | ||||
|         result.add_callbacks( | ||||
|             _close_span_on_success, | ||||
|             _close_span_on_error, | ||||
|             callback_args=(result,), | ||||
|             errback_args=(result,) | ||||
|         ) | ||||
|         result.clear_callbacks() | ||||
|         return result | ||||
|     except Exception: | ||||
|         with span: | ||||
|             span.set_exc_info(*sys.exc_info()) | ||||
|         raise | ||||
| 
 | ||||
| 
 | ||||
| def _start_span_and_set_tags(pin, query, session, cluster): | ||||
|     service = pin.service | ||||
|     tracer = pin.tracer | ||||
|     span = tracer.trace('cassandra.query', service=service, span_type=SpanTypes.CASSANDRA) | ||||
|     _sanitize_query(span, query) | ||||
|     span.set_tags(_extract_session_metas(session))     # FIXME[matt] do once? | ||||
|     span.set_tags(_extract_cluster_metas(cluster)) | ||||
|     # set analytics sample rate if enabled | ||||
|     span.set_tag( | ||||
|         ANALYTICS_SAMPLE_RATE_KEY, | ||||
|         config.cassandra.get_analytics_sample_rate() | ||||
|     ) | ||||
|     return span | ||||
| 
 | ||||
| 
 | ||||
| def _extract_session_metas(session): | ||||
|     metas = {} | ||||
| 
 | ||||
|     if getattr(session, 'keyspace', None): | ||||
|         # FIXME the keyspace can be overridden explicitly in the query itself | ||||
|         # e.g. 'select * from trace.hash_to_resource' | ||||
|         metas[cassx.KEYSPACE] = session.keyspace.lower() | ||||
| 
 | ||||
|     return metas | ||||
| 
 | ||||
| 
 | ||||
| def _extract_cluster_metas(cluster): | ||||
|     metas = {} | ||||
|     if deep_getattr(cluster, 'metadata.cluster_name'): | ||||
|         metas[cassx.CLUSTER] = cluster.metadata.cluster_name | ||||
|     if getattr(cluster, 'port', None): | ||||
|         metas[net.TARGET_PORT] = cluster.port | ||||
| 
 | ||||
|     return metas | ||||
| 
 | ||||
| 
 | ||||
| def _extract_result_metas(result): | ||||
|     metas = {} | ||||
|     if result is None: | ||||
|         return metas | ||||
| 
 | ||||
|     future = getattr(result, 'response_future', None) | ||||
| 
 | ||||
|     if future: | ||||
|         # get the host | ||||
|         host = getattr(future, 'coordinator_host', None) | ||||
|         if host: | ||||
|             metas[net.TARGET_HOST] = host | ||||
|         elif hasattr(future, '_current_host'): | ||||
|             address = deep_getattr(future, '_current_host.address') | ||||
|             if address: | ||||
|                 metas[net.TARGET_HOST] = address | ||||
| 
 | ||||
|         query = getattr(future, 'query', None) | ||||
|         if getattr(query, 'consistency_level', None): | ||||
|             metas[cassx.CONSISTENCY_LEVEL] = query.consistency_level | ||||
|         if getattr(query, 'keyspace', None): | ||||
|             metas[cassx.KEYSPACE] = query.keyspace.lower() | ||||
| 
 | ||||
|         page_number = getattr(future, PAGE_NUMBER, 1) | ||||
|         has_more_pages = getattr(future, 'has_more_pages') | ||||
|         is_paginated = has_more_pages or page_number > 1 | ||||
|         metas[cassx.PAGINATED] = is_paginated | ||||
|         if is_paginated: | ||||
|             metas[cassx.PAGE_NUMBER] = page_number | ||||
| 
 | ||||
|     if hasattr(result, 'current_rows'): | ||||
|         result_rows = result.current_rows or [] | ||||
|         metas[cassx.ROW_COUNT] = len(result_rows) | ||||
| 
 | ||||
|     return metas | ||||
| 
 | ||||
| 
 | ||||
| def _sanitize_query(span, query): | ||||
|     # TODO (aaditya): fix this hacky type check. we need it to avoid circular imports | ||||
|     t = type(query).__name__ | ||||
| 
 | ||||
|     resource = None | ||||
|     if t in ('SimpleStatement', 'PreparedStatement'): | ||||
|         # reset query if a string is available | ||||
|         resource = getattr(query, 'query_string', query) | ||||
|     elif t == 'BatchStatement': | ||||
|         resource = 'BatchStatement' | ||||
|         # Each element in `_statements_and_parameters` is: | ||||
|         #   (is_prepared, statement, parameters) | ||||
|         #  ref:https://github.com/datastax/python-driver/blob/13d6d72be74f40fcef5ec0f2b3e98538b3b87459/cassandra/query.py#L844 | ||||
|         # | ||||
|         # For prepared statements, the `statement` value is just the query_id | ||||
|         #   which is not a statement and when trying to join with other strings | ||||
|         #   raises an error in python3 around joining bytes to unicode, so this | ||||
|         #   just filters out prepared statements from this tag value | ||||
|         q = '; '.join(q[1] for q in query._statements_and_parameters[:2] if not q[0]) | ||||
|         span.set_tag('cassandra.query', q) | ||||
|         span.set_metric('cassandra.batch_size', len(query._statements_and_parameters)) | ||||
|     elif t == 'BoundStatement': | ||||
|         ps = getattr(query, 'prepared_statement', None) | ||||
|         if ps: | ||||
|             resource = getattr(ps, 'query_string', None) | ||||
|     elif t == 'str': | ||||
|         resource = query | ||||
|     else: | ||||
|         resource = 'unknown-query-type'  # FIXME[matt] what else do to here? | ||||
| 
 | ||||
|     span.resource = stringify(resource)[:RESOURCE_MAX_LENGTH] | ||||
| 
 | ||||
| 
 | ||||
| # | ||||
| # DEPRECATED | ||||
| # | ||||
| 
 | ||||
| @deprecated(message='Use patching instead (see the docs).', version='1.0.0') | ||||
| def get_traced_cassandra(*args, **kwargs): | ||||
|     return _get_traced_cluster(*args, **kwargs) | ||||
| 
 | ||||
| 
 | ||||
| def _get_traced_cluster(*args, **kwargs): | ||||
|     return cassandra.cluster.Cluster | ||||
|  | @ -0,0 +1,54 @@ | |||
| """ | ||||
| The Celery integration will trace all tasks that are executed in the | ||||
| background. Functions and class based tasks are traced only if the Celery API | ||||
| is used, so calling the function directly or via the ``run()`` method will not | ||||
| generate traces. However, calling ``apply()``, ``apply_async()`` and ``delay()`` | ||||
| will produce tracing data. To trace your Celery application, call the patch method:: | ||||
| 
 | ||||
|     import celery | ||||
|     from ddtrace import patch | ||||
| 
 | ||||
|     patch(celery=True) | ||||
|     app = celery.Celery() | ||||
| 
 | ||||
|     @app.task | ||||
|     def my_task(): | ||||
|         pass | ||||
| 
 | ||||
|     class MyTask(app.Task): | ||||
|         def run(self): | ||||
|             pass | ||||
| 
 | ||||
| 
 | ||||
| To change Celery service name, you can use the ``Config`` API as follows:: | ||||
| 
 | ||||
|     from ddtrace import config | ||||
| 
 | ||||
|     # change service names for producers and workers | ||||
|     config.celery['producer_service_name'] = 'task-queue' | ||||
|     config.celery['worker_service_name'] = 'worker-notify' | ||||
| 
 | ||||
| By default, reported service names are: | ||||
|     * ``celery-producer`` when tasks are enqueued for processing | ||||
|     * ``celery-worker`` when tasks are processed by a Celery process | ||||
| 
 | ||||
| """ | ||||
| from ...utils.importlib import require_modules | ||||
| 
 | ||||
| 
 | ||||
| required_modules = ['celery'] | ||||
| 
 | ||||
| with require_modules(required_modules) as missing_modules: | ||||
|     if not missing_modules: | ||||
|         from .app import patch_app, unpatch_app | ||||
|         from .patch import patch, unpatch | ||||
|         from .task import patch_task, unpatch_task | ||||
| 
 | ||||
|         __all__ = [ | ||||
|             'patch', | ||||
|             'patch_app', | ||||
|             'patch_task', | ||||
|             'unpatch', | ||||
|             'unpatch_app', | ||||
|             'unpatch_task', | ||||
|         ] | ||||
|  | @ -0,0 +1,60 @@ | |||
| from celery import signals | ||||
| 
 | ||||
| from ddtrace import Pin, config | ||||
| from ddtrace.pin import _DD_PIN_NAME | ||||
| 
 | ||||
| from .constants import APP | ||||
| from .signals import ( | ||||
|     trace_prerun, | ||||
|     trace_postrun, | ||||
|     trace_before_publish, | ||||
|     trace_after_publish, | ||||
|     trace_failure, | ||||
|     trace_retry, | ||||
| ) | ||||
| 
 | ||||
| 
 | ||||
| def patch_app(app, pin=None): | ||||
|     """Attach the Pin class to the application and connect | ||||
|     our handlers to Celery signals. | ||||
|     """ | ||||
|     if getattr(app, '__datadog_patch', False): | ||||
|         return | ||||
|     setattr(app, '__datadog_patch', True) | ||||
| 
 | ||||
|     # attach the PIN object | ||||
|     pin = pin or Pin( | ||||
|         service=config.celery['worker_service_name'], | ||||
|         app=APP, | ||||
|         _config=config.celery, | ||||
|     ) | ||||
|     pin.onto(app) | ||||
|     # connect to the Signal framework | ||||
| 
 | ||||
|     signals.task_prerun.connect(trace_prerun, weak=False) | ||||
|     signals.task_postrun.connect(trace_postrun, weak=False) | ||||
|     signals.before_task_publish.connect(trace_before_publish, weak=False) | ||||
|     signals.after_task_publish.connect(trace_after_publish, weak=False) | ||||
|     signals.task_failure.connect(trace_failure, weak=False) | ||||
|     signals.task_retry.connect(trace_retry, weak=False) | ||||
|     return app | ||||
| 
 | ||||
| 
 | ||||
| def unpatch_app(app): | ||||
|     """Remove the Pin instance from the application and disconnect | ||||
|     our handlers from Celery signal framework. | ||||
|     """ | ||||
|     if not getattr(app, '__datadog_patch', False): | ||||
|         return | ||||
|     setattr(app, '__datadog_patch', False) | ||||
| 
 | ||||
|     pin = Pin.get_from(app) | ||||
|     if pin is not None: | ||||
|         delattr(app, _DD_PIN_NAME) | ||||
| 
 | ||||
|     signals.task_prerun.disconnect(trace_prerun) | ||||
|     signals.task_postrun.disconnect(trace_postrun) | ||||
|     signals.before_task_publish.disconnect(trace_before_publish) | ||||
|     signals.after_task_publish.disconnect(trace_after_publish) | ||||
|     signals.task_failure.disconnect(trace_failure) | ||||
|     signals.task_retry.disconnect(trace_retry) | ||||
|  | @ -0,0 +1,22 @@ | |||
| from os import getenv | ||||
| 
 | ||||
| # Celery Context key | ||||
| CTX_KEY = '__dd_task_span' | ||||
| 
 | ||||
| # Span names | ||||
| PRODUCER_ROOT_SPAN = 'celery.apply' | ||||
| WORKER_ROOT_SPAN = 'celery.run' | ||||
| 
 | ||||
| # Task operations | ||||
| TASK_TAG_KEY = 'celery.action' | ||||
| TASK_APPLY = 'apply' | ||||
| TASK_APPLY_ASYNC = 'apply_async' | ||||
| TASK_RUN = 'run' | ||||
| TASK_RETRY_REASON_KEY = 'celery.retry.reason' | ||||
| 
 | ||||
| # Service info | ||||
| APP = 'celery' | ||||
| # `getenv()` call must be kept for backward compatibility; we may remove it | ||||
| # later when we do a full migration to the `Config` class | ||||
| PRODUCER_SERVICE = getenv('DATADOG_SERVICE_NAME') or 'celery-producer' | ||||
| WORKER_SERVICE = getenv('DATADOG_SERVICE_NAME') or 'celery-worker' | ||||
|  | @ -0,0 +1,28 @@ | |||
| import celery | ||||
| 
 | ||||
| from ddtrace import config | ||||
| 
 | ||||
| from .app import patch_app, unpatch_app | ||||
| from .constants import PRODUCER_SERVICE, WORKER_SERVICE | ||||
| from ...utils.formats import get_env | ||||
| 
 | ||||
| 
 | ||||
| # Celery default settings | ||||
| config._add('celery', { | ||||
|     'producer_service_name': get_env('celery', 'producer_service_name', PRODUCER_SERVICE), | ||||
|     'worker_service_name': get_env('celery', 'worker_service_name', WORKER_SERVICE), | ||||
| }) | ||||
| 
 | ||||
| 
 | ||||
| def patch(): | ||||
|     """Instrument Celery base application and the `TaskRegistry` so | ||||
|     that any new registered task is automatically instrumented. In the | ||||
|     case of Django-Celery integration, also the `@shared_task` decorator | ||||
|     must be instrumented because Django doesn't use the Celery registry. | ||||
|     """ | ||||
|     patch_app(celery.Celery) | ||||
| 
 | ||||
| 
 | ||||
| def unpatch(): | ||||
|     """Disconnect all signals and remove Tracing capabilities""" | ||||
|     unpatch_app(celery.Celery) | ||||
|  | @ -0,0 +1,154 @@ | |||
| from ddtrace import Pin, config | ||||
| 
 | ||||
| from celery import registry | ||||
| 
 | ||||
| from ...ext import SpanTypes | ||||
| from ...internal.logger import get_logger | ||||
| from . import constants as c | ||||
| from .utils import tags_from_context, retrieve_task_id, attach_span, detach_span, retrieve_span | ||||
| 
 | ||||
| log = get_logger(__name__) | ||||
| 
 | ||||
| 
 | ||||
| def trace_prerun(*args, **kwargs): | ||||
|     # safe-guard to avoid crashes in case the signals API | ||||
|     # changes in Celery | ||||
|     task = kwargs.get('sender') | ||||
|     task_id = kwargs.get('task_id') | ||||
|     log.debug('prerun signal start task_id=%s', task_id) | ||||
|     if task is None or task_id is None: | ||||
|         log.debug('unable to extract the Task and the task_id. This version of Celery may not be supported.') | ||||
|         return | ||||
| 
 | ||||
|     # retrieve the task Pin or fallback to the global one | ||||
|     pin = Pin.get_from(task) or Pin.get_from(task.app) | ||||
|     if pin is None: | ||||
|         log.debug('no pin found on task or task.app task_id=%s', task_id) | ||||
|         return | ||||
| 
 | ||||
|     # propagate the `Span` in the current task Context | ||||
|     service = config.celery['worker_service_name'] | ||||
|     span = pin.tracer.trace(c.WORKER_ROOT_SPAN, service=service, resource=task.name, span_type=SpanTypes.WORKER) | ||||
|     attach_span(task, task_id, span) | ||||
| 
 | ||||
| 
 | ||||
| def trace_postrun(*args, **kwargs): | ||||
|     # safe-guard to avoid crashes in case the signals API | ||||
|     # changes in Celery | ||||
|     task = kwargs.get('sender') | ||||
|     task_id = kwargs.get('task_id') | ||||
|     log.debug('postrun signal task_id=%s', task_id) | ||||
|     if task is None or task_id is None: | ||||
|         log.debug('unable to extract the Task and the task_id. This version of Celery may not be supported.') | ||||
|         return | ||||
| 
 | ||||
|     # retrieve and finish the Span | ||||
|     span = retrieve_span(task, task_id) | ||||
|     if span is None: | ||||
|         log.warning('no existing span found for task_id=%s', task_id) | ||||
|         return | ||||
|     else: | ||||
|         # request context tags | ||||
|         span.set_tag(c.TASK_TAG_KEY, c.TASK_RUN) | ||||
|         span.set_tags(tags_from_context(kwargs)) | ||||
|         span.set_tags(tags_from_context(task.request)) | ||||
|         span.finish() | ||||
|         detach_span(task, task_id) | ||||
| 
 | ||||
| 
 | ||||
| def trace_before_publish(*args, **kwargs): | ||||
|     # `before_task_publish` signal doesn't propagate the task instance so | ||||
|     # we need to retrieve it from the Celery Registry to access the `Pin`. The | ||||
|     # `Task` instance **does not** include any information about the current | ||||
|     # execution, so it **must not** be used to retrieve `request` data. | ||||
|     task_name = kwargs.get('sender') | ||||
|     task = registry.tasks.get(task_name) | ||||
|     task_id = retrieve_task_id(kwargs) | ||||
|     # safe-guard to avoid crashes in case the signals API | ||||
|     # changes in Celery | ||||
|     if task is None or task_id is None: | ||||
|         log.debug('unable to extract the Task and the task_id. This version of Celery may not be supported.') | ||||
|         return | ||||
| 
 | ||||
|     # propagate the `Span` in the current task Context | ||||
|     pin = Pin.get_from(task) or Pin.get_from(task.app) | ||||
|     if pin is None: | ||||
|         return | ||||
| 
 | ||||
|     # apply some tags here because most of the data is not available | ||||
|     # in the task_after_publish signal | ||||
|     service = config.celery['producer_service_name'] | ||||
|     span = pin.tracer.trace(c.PRODUCER_ROOT_SPAN, service=service, resource=task_name) | ||||
|     span.set_tag(c.TASK_TAG_KEY, c.TASK_APPLY_ASYNC) | ||||
|     span.set_tag('celery.id', task_id) | ||||
|     span.set_tags(tags_from_context(kwargs)) | ||||
|     # Note: adding tags from `traceback` or `state` calls will make an | ||||
|     # API call to the backend for the properties so we should rely | ||||
|     # only on the given `Context` | ||||
|     attach_span(task, task_id, span, is_publish=True) | ||||
| 
 | ||||
| 
 | ||||
| def trace_after_publish(*args, **kwargs): | ||||
|     task_name = kwargs.get('sender') | ||||
|     task = registry.tasks.get(task_name) | ||||
|     task_id = retrieve_task_id(kwargs) | ||||
|     # safe-guard to avoid crashes in case the signals API | ||||
|     # changes in Celery | ||||
|     if task is None or task_id is None: | ||||
|         log.debug('unable to extract the Task and the task_id. This version of Celery may not be supported.') | ||||
|         return | ||||
| 
 | ||||
|     # retrieve and finish the Span | ||||
|     span = retrieve_span(task, task_id, is_publish=True) | ||||
|     if span is None: | ||||
|         return | ||||
|     else: | ||||
|         span.finish() | ||||
|         detach_span(task, task_id, is_publish=True) | ||||
| 
 | ||||
| 
 | ||||
| def trace_failure(*args, **kwargs): | ||||
|     # safe-guard to avoid crashes in case the signals API | ||||
|     # changes in Celery | ||||
|     task = kwargs.get('sender') | ||||
|     task_id = kwargs.get('task_id') | ||||
|     if task is None or task_id is None: | ||||
|         log.debug('unable to extract the Task and the task_id. This version of Celery may not be supported.') | ||||
|         return | ||||
| 
 | ||||
|     # retrieve and finish the Span | ||||
|     span = retrieve_span(task, task_id) | ||||
|     if span is None: | ||||
|         return | ||||
|     else: | ||||
|         # add Exception tags; post signals are still called | ||||
|         # so we don't need to attach other tags here | ||||
|         ex = kwargs.get('einfo') | ||||
|         if ex is None: | ||||
|             return | ||||
|         if hasattr(task, 'throws') and isinstance(ex.exception, task.throws): | ||||
|             return | ||||
|         span.set_exc_info(ex.type, ex.exception, ex.tb) | ||||
| 
 | ||||
| 
 | ||||
| def trace_retry(*args, **kwargs): | ||||
|     # safe-guard to avoid crashes in case the signals API | ||||
|     # changes in Celery | ||||
|     task = kwargs.get('sender') | ||||
|     context = kwargs.get('request') | ||||
|     if task is None or context is None: | ||||
|         log.debug('unable to extract the Task or the Context. This version of Celery may not be supported.') | ||||
|         return | ||||
| 
 | ||||
|     reason = kwargs.get('reason') | ||||
|     if not reason: | ||||
|         log.debug('unable to extract the retry reason. This version of Celery may not be supported.') | ||||
|         return | ||||
| 
 | ||||
|     span = retrieve_span(task, context.id) | ||||
|     if span is None: | ||||
|         return | ||||
| 
 | ||||
|     # Add retry reason metadata to span | ||||
|     # DEV: Use `str(reason)` instead of `reason.message` in case we get something that isn't an `Exception` | ||||
|     span.set_tag(c.TASK_RETRY_REASON_KEY, str(reason)) | ||||
|  | @ -0,0 +1,32 @@ | |||
| from .app import patch_app | ||||
| 
 | ||||
| from ...utils.deprecation import deprecation | ||||
| 
 | ||||
| 
 | ||||
| def patch_task(task, pin=None): | ||||
|     """Deprecated API. The new API uses signals that can be activated via | ||||
|     patch(celery=True) or through `ddtrace-run` script. Using this API | ||||
|     enables instrumentation on all tasks. | ||||
|     """ | ||||
|     deprecation( | ||||
|         name='ddtrace.contrib.celery.patch_task', | ||||
|         message='Use `patch(celery=True)` or `ddtrace-run` script instead', | ||||
|         version='1.0.0', | ||||
|     ) | ||||
| 
 | ||||
|     # Enable instrumentation everywhere | ||||
|     patch_app(task.app) | ||||
|     return task | ||||
| 
 | ||||
| 
 | ||||
| def unpatch_task(task): | ||||
|     """Deprecated API. The new API uses signals that can be deactivated | ||||
|     via unpatch() API. This API is now a no-op implementation so it doesn't | ||||
|     affect instrumented tasks. | ||||
|     """ | ||||
|     deprecation( | ||||
|         name='ddtrace.contrib.celery.patch_task', | ||||
|         message='Use `unpatch()` instead', | ||||
|         version='1.0.0', | ||||
|     ) | ||||
|     return task | ||||
|  | @ -0,0 +1,106 @@ | |||
| from weakref import WeakValueDictionary | ||||
| 
 | ||||
| from .constants import CTX_KEY | ||||
| 
 | ||||
| 
 | ||||
| def tags_from_context(context): | ||||
|     """Helper to extract meta values from a Celery Context""" | ||||
|     tag_keys = ( | ||||
|         'compression', 'correlation_id', 'countdown', 'delivery_info', 'eta', | ||||
|         'exchange', 'expires', 'hostname', 'id', 'priority', 'queue', 'reply_to', | ||||
|         'retries', 'routing_key', 'serializer', 'timelimit', 'origin', 'state', | ||||
|     ) | ||||
| 
 | ||||
|     tags = {} | ||||
|     for key in tag_keys: | ||||
|         value = context.get(key) | ||||
| 
 | ||||
|         # Skip this key if it is not set | ||||
|         if value is None or value == '': | ||||
|             continue | ||||
| 
 | ||||
|         # Skip `timelimit` if it is not set (it's default/unset value is a | ||||
|         # tuple or a list of `None` values | ||||
|         if key == 'timelimit' and value in [(None, None), [None, None]]: | ||||
|             continue | ||||
| 
 | ||||
|         # Skip `retries` if it's value is `0` | ||||
|         if key == 'retries' and value == 0: | ||||
|             continue | ||||
| 
 | ||||
|         # Celery 4.0 uses `origin` instead of `hostname`; this change preserves | ||||
|         # the same name for the tag despite Celery version | ||||
|         if key == 'origin': | ||||
|             key = 'hostname' | ||||
| 
 | ||||
|         # prefix the tag as 'celery' | ||||
|         tag_name = 'celery.{}'.format(key) | ||||
|         tags[tag_name] = value | ||||
|     return tags | ||||
| 
 | ||||
| 
 | ||||
| def attach_span(task, task_id, span, is_publish=False): | ||||
|     """Helper to propagate a `Span` for the given `Task` instance. This | ||||
|     function uses a `WeakValueDictionary` that stores a Datadog Span using | ||||
|     the `(task_id, is_publish)` as a key. This is useful when information must be | ||||
|     propagated from one Celery signal to another. | ||||
| 
 | ||||
|     DEV: We use (task_id, is_publish) for the key to ensure that publishing a | ||||
|          task from within another task does not cause any conflicts. | ||||
| 
 | ||||
|          This mostly happens when either a task fails and a retry policy is in place, | ||||
|          or when a task is manually retries (e.g. `task.retry()`), we end up trying | ||||
|          to publish a task with the same id as the task currently running. | ||||
| 
 | ||||
|          Previously publishing the new task would overwrite the existing `celery.run` span | ||||
|          in the `weak_dict` causing that span to be forgotten and never finished. | ||||
| 
 | ||||
|          NOTE: We cannot test for this well yet, because we do not run a celery worker, | ||||
|          and cannot run `task.apply_async()` | ||||
|     """ | ||||
|     weak_dict = getattr(task, CTX_KEY, None) | ||||
|     if weak_dict is None: | ||||
|         weak_dict = WeakValueDictionary() | ||||
|         setattr(task, CTX_KEY, weak_dict) | ||||
| 
 | ||||
|     weak_dict[(task_id, is_publish)] = span | ||||
| 
 | ||||
| 
 | ||||
| def detach_span(task, task_id, is_publish=False): | ||||
|     """Helper to remove a `Span` in a Celery task when it's propagated. | ||||
|     This function handles tasks where the `Span` is not attached. | ||||
|     """ | ||||
|     weak_dict = getattr(task, CTX_KEY, None) | ||||
|     if weak_dict is None: | ||||
|         return | ||||
| 
 | ||||
|     # DEV: See note in `attach_span` for key info | ||||
|     weak_dict.pop((task_id, is_publish), None) | ||||
| 
 | ||||
| 
 | ||||
| def retrieve_span(task, task_id, is_publish=False): | ||||
|     """Helper to retrieve an active `Span` stored in a `Task` | ||||
|     instance | ||||
|     """ | ||||
|     weak_dict = getattr(task, CTX_KEY, None) | ||||
|     if weak_dict is None: | ||||
|         return | ||||
|     else: | ||||
|         # DEV: See note in `attach_span` for key info | ||||
|         return weak_dict.get((task_id, is_publish)) | ||||
| 
 | ||||
| 
 | ||||
| def retrieve_task_id(context): | ||||
|     """Helper to retrieve the `Task` identifier from the message `body`. | ||||
|     This helper supports Protocol Version 1 and 2. The Protocol is well | ||||
|     detailed in the official documentation: | ||||
|     http://docs.celeryproject.org/en/latest/internals/protocol.html | ||||
|     """ | ||||
|     headers = context.get('headers') | ||||
|     body = context.get('body') | ||||
|     if headers: | ||||
|         # Protocol Version 2 (default from Celery 4.0) | ||||
|         return headers.get('id') | ||||
|     else: | ||||
|         # Protocol Version 1 | ||||
|         return body.get('id') | ||||
|  | @ -0,0 +1,29 @@ | |||
| """Instrument Consul to trace KV queries. | ||||
| 
 | ||||
| Only supports tracing for the syncronous client. | ||||
| 
 | ||||
| ``patch_all`` will automatically patch your Consul client to make it work. | ||||
| :: | ||||
| 
 | ||||
|     from ddtrace import Pin, patch | ||||
|     import consul | ||||
| 
 | ||||
|     # If not patched yet, you can patch consul specifically | ||||
|     patch(consul=True) | ||||
| 
 | ||||
|     # This will report a span with the default settings | ||||
|     client = consul.Consul(host="127.0.0.1", port=8500) | ||||
|     client.get("my-key") | ||||
| 
 | ||||
|     # Use a pin to specify metadata related to this client | ||||
|     Pin.override(client, service='consul-kv') | ||||
| """ | ||||
| 
 | ||||
| from ...utils.importlib import require_modules | ||||
| 
 | ||||
| required_modules = ['consul'] | ||||
| 
 | ||||
| with require_modules(required_modules) as missing_modules: | ||||
|     if not missing_modules: | ||||
|         from .patch import patch, unpatch | ||||
|         __all__ = ['patch', 'unpatch'] | ||||
|  | @ -0,0 +1,57 @@ | |||
| import consul | ||||
| 
 | ||||
| from ddtrace.vendor.wrapt import wrap_function_wrapper as _w | ||||
| 
 | ||||
| from ddtrace import config | ||||
| from ...constants import ANALYTICS_SAMPLE_RATE_KEY | ||||
| from ...ext import consul as consulx | ||||
| from ...pin import Pin | ||||
| from ...utils.wrappers import unwrap as _u | ||||
| 
 | ||||
| 
 | ||||
| _KV_FUNCS = ['put', 'get', 'delete'] | ||||
| 
 | ||||
| 
 | ||||
| def patch(): | ||||
|     if getattr(consul, '__datadog_patch', False): | ||||
|         return | ||||
|     setattr(consul, '__datadog_patch', True) | ||||
| 
 | ||||
|     pin = Pin(service=consulx.SERVICE, app=consulx.APP) | ||||
|     pin.onto(consul.Consul.KV) | ||||
| 
 | ||||
|     for f_name in _KV_FUNCS: | ||||
|         _w('consul', 'Consul.KV.%s' % f_name, wrap_function(f_name)) | ||||
| 
 | ||||
| 
 | ||||
| def unpatch(): | ||||
|     if not getattr(consul, '__datadog_patch', False): | ||||
|         return | ||||
|     setattr(consul, '__datadog_patch', False) | ||||
| 
 | ||||
|     for f_name in _KV_FUNCS: | ||||
|         _u(consul.Consul.KV, f_name) | ||||
| 
 | ||||
| 
 | ||||
| def wrap_function(name): | ||||
|     def trace_func(wrapped, instance, args, kwargs): | ||||
|         pin = Pin.get_from(instance) | ||||
|         if not pin or not pin.enabled(): | ||||
|             return wrapped(*args, **kwargs) | ||||
| 
 | ||||
|         # Only patch the syncronous implementation | ||||
|         if not isinstance(instance.agent.http, consul.std.HTTPClient): | ||||
|             return wrapped(*args, **kwargs) | ||||
| 
 | ||||
|         path = kwargs.get('key') or args[0] | ||||
|         resource = name.upper() | ||||
| 
 | ||||
|         with pin.tracer.trace(consulx.CMD, service=pin.service, resource=resource) as span: | ||||
|             rate = config.consul.get_analytics_sample_rate() | ||||
|             if rate is not None: | ||||
|                 span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, rate) | ||||
|             span.set_tag(consulx.KEY, path) | ||||
|             span.set_tag(consulx.CMD, resource) | ||||
|             return wrapped(*args, **kwargs) | ||||
| 
 | ||||
|     return trace_func | ||||
|  | @ -0,0 +1,204 @@ | |||
| """ | ||||
| Generic dbapi tracing code. | ||||
| """ | ||||
| 
 | ||||
| from ...constants import ANALYTICS_SAMPLE_RATE_KEY | ||||
| from ...ext import SpanTypes, sql | ||||
| from ...internal.logger import get_logger | ||||
| from ...pin import Pin | ||||
| from ...settings import config | ||||
| from ...utils.formats import asbool, get_env | ||||
| from ...vendor import wrapt | ||||
| 
 | ||||
| 
 | ||||
| log = get_logger(__name__) | ||||
| 
 | ||||
| config._add('dbapi2', dict( | ||||
|     trace_fetch_methods=asbool(get_env('dbapi2', 'trace_fetch_methods', 'false')), | ||||
| )) | ||||
| 
 | ||||
| 
 | ||||
| class TracedCursor(wrapt.ObjectProxy): | ||||
|     """ TracedCursor wraps a psql cursor and traces it's queries. """ | ||||
| 
 | ||||
|     def __init__(self, cursor, pin): | ||||
|         super(TracedCursor, self).__init__(cursor) | ||||
|         pin.onto(self) | ||||
|         name = pin.app or 'sql' | ||||
|         self._self_datadog_name = '{}.query'.format(name) | ||||
|         self._self_last_execute_operation = None | ||||
| 
 | ||||
|     def _trace_method(self, method, name, resource, extra_tags, *args, **kwargs): | ||||
|         """ | ||||
|         Internal function to trace the call to the underlying cursor method | ||||
|         :param method: The callable to be wrapped | ||||
|         :param name: The name of the resulting span. | ||||
|         :param resource: The sql query. Sql queries are obfuscated on the agent side. | ||||
|         :param extra_tags: A dict of tags to store into the span's meta | ||||
|         :param args: The args that will be passed as positional args to the wrapped method | ||||
|         :param kwargs: The args that will be passed as kwargs to the wrapped method | ||||
|         :return: The result of the wrapped method invocation | ||||
|         """ | ||||
|         pin = Pin.get_from(self) | ||||
|         if not pin or not pin.enabled(): | ||||
|             return method(*args, **kwargs) | ||||
|         service = pin.service | ||||
|         with pin.tracer.trace(name, service=service, resource=resource, span_type=SpanTypes.SQL) as s: | ||||
|             # No reason to tag the query since it is set as the resource by the agent. See: | ||||
|             # https://github.com/DataDog/datadog-trace-agent/blob/bda1ebbf170dd8c5879be993bdd4dbae70d10fda/obfuscate/sql.go#L232 | ||||
|             s.set_tags(pin.tags) | ||||
|             s.set_tags(extra_tags) | ||||
| 
 | ||||
|             # set analytics sample rate if enabled but only for non-FetchTracedCursor | ||||
|             if not isinstance(self, FetchTracedCursor): | ||||
|                 s.set_tag( | ||||
|                     ANALYTICS_SAMPLE_RATE_KEY, | ||||
|                     config.dbapi2.get_analytics_sample_rate() | ||||
|                 ) | ||||
| 
 | ||||
|             try: | ||||
|                 return method(*args, **kwargs) | ||||
|             finally: | ||||
|                 row_count = self.__wrapped__.rowcount | ||||
|                 s.set_metric('db.rowcount', row_count) | ||||
|                 # Necessary for django integration backward compatibility. Django integration used to provide its own | ||||
|                 # implementation of the TracedCursor, which used to store the row count into a tag instead of | ||||
|                 # as a metric. Such custom implementation has been replaced by this generic dbapi implementation and | ||||
|                 # this tag has been added since. | ||||
|                 if row_count and row_count >= 0: | ||||
|                     s.set_tag(sql.ROWS, row_count) | ||||
| 
 | ||||
|     def executemany(self, query, *args, **kwargs): | ||||
|         """ Wraps the cursor.executemany method""" | ||||
|         self._self_last_execute_operation = query | ||||
|         # Always return the result as-is | ||||
|         # DEV: Some libraries return `None`, others `int`, and others the cursor objects | ||||
|         #      These differences should be overriden at the integration specific layer (e.g. in `sqlite3/patch.py`) | ||||
|         # FIXME[matt] properly handle kwargs here. arg names can be different | ||||
|         # with different libs. | ||||
|         return self._trace_method( | ||||
|             self.__wrapped__.executemany, self._self_datadog_name, query, {'sql.executemany': 'true'}, | ||||
|             query, *args, **kwargs) | ||||
| 
 | ||||
|     def execute(self, query, *args, **kwargs): | ||||
|         """ Wraps the cursor.execute method""" | ||||
|         self._self_last_execute_operation = query | ||||
| 
 | ||||
|         # Always return the result as-is | ||||
|         # DEV: Some libraries return `None`, others `int`, and others the cursor objects | ||||
|         #      These differences should be overriden at the integration specific layer (e.g. in `sqlite3/patch.py`) | ||||
|         return self._trace_method(self.__wrapped__.execute, self._self_datadog_name, query, {}, query, *args, **kwargs) | ||||
| 
 | ||||
|     def callproc(self, proc, args): | ||||
|         """ Wraps the cursor.callproc method""" | ||||
|         self._self_last_execute_operation = proc | ||||
|         return self._trace_method(self.__wrapped__.callproc, self._self_datadog_name, proc, {}, proc, args) | ||||
| 
 | ||||
|     def __enter__(self): | ||||
|         # previous versions of the dbapi didn't support context managers. let's | ||||
|         # reference the func that would be called to ensure that errors | ||||
|         # messages will be the same. | ||||
|         self.__wrapped__.__enter__ | ||||
| 
 | ||||
|         # and finally, yield the traced cursor. | ||||
|         return self | ||||
| 
 | ||||
| 
 | ||||
| class FetchTracedCursor(TracedCursor): | ||||
|     """ | ||||
|     Sub-class of :class:`TracedCursor` that also instruments `fetchone`, `fetchall`, and `fetchmany` methods. | ||||
| 
 | ||||
|     We do not trace these functions by default since they can get very noisy (e.g. `fetchone` with 100k rows). | ||||
|     """ | ||||
|     def fetchone(self, *args, **kwargs): | ||||
|         """ Wraps the cursor.fetchone method""" | ||||
|         span_name = '{}.{}'.format(self._self_datadog_name, 'fetchone') | ||||
|         return self._trace_method(self.__wrapped__.fetchone, span_name, self._self_last_execute_operation, {}, | ||||
|                                   *args, **kwargs) | ||||
| 
 | ||||
|     def fetchall(self, *args, **kwargs): | ||||
|         """ Wraps the cursor.fetchall method""" | ||||
|         span_name = '{}.{}'.format(self._self_datadog_name, 'fetchall') | ||||
|         return self._trace_method(self.__wrapped__.fetchall, span_name, self._self_last_execute_operation, {}, | ||||
|                                   *args, **kwargs) | ||||
| 
 | ||||
|     def fetchmany(self, *args, **kwargs): | ||||
|         """ Wraps the cursor.fetchmany method""" | ||||
|         span_name = '{}.{}'.format(self._self_datadog_name, 'fetchmany') | ||||
|         # We want to trace the information about how many rows were requested. Note that this number may be larger | ||||
|         # the number of rows actually returned if less then requested are available from the query. | ||||
|         size_tag_key = 'db.fetch.size' | ||||
|         if 'size' in kwargs: | ||||
|             extra_tags = {size_tag_key: kwargs.get('size')} | ||||
|         elif len(args) == 1 and isinstance(args[0], int): | ||||
|             extra_tags = {size_tag_key: args[0]} | ||||
|         else: | ||||
|             default_array_size = getattr(self.__wrapped__, 'arraysize', None) | ||||
|             extra_tags = {size_tag_key: default_array_size} if default_array_size else {} | ||||
| 
 | ||||
|         return self._trace_method(self.__wrapped__.fetchmany, span_name, self._self_last_execute_operation, extra_tags, | ||||
|                                   *args, **kwargs) | ||||
| 
 | ||||
| 
 | ||||
| class TracedConnection(wrapt.ObjectProxy): | ||||
|     """ TracedConnection wraps a Connection with tracing code. """ | ||||
| 
 | ||||
|     def __init__(self, conn, pin=None, cursor_cls=None): | ||||
|         # Set default cursor class if one was not provided | ||||
|         if not cursor_cls: | ||||
|             # Do not trace `fetch*` methods by default | ||||
|             cursor_cls = TracedCursor | ||||
|             if config.dbapi2.trace_fetch_methods: | ||||
|                 cursor_cls = FetchTracedCursor | ||||
| 
 | ||||
|         super(TracedConnection, self).__init__(conn) | ||||
|         name = _get_vendor(conn) | ||||
|         self._self_datadog_name = '{}.connection'.format(name) | ||||
|         db_pin = pin or Pin(service=name, app=name) | ||||
|         db_pin.onto(self) | ||||
|         # wrapt requires prefix of `_self` for attributes that are only in the | ||||
|         # proxy (since some of our source objects will use `__slots__`) | ||||
|         self._self_cursor_cls = cursor_cls | ||||
| 
 | ||||
|     def _trace_method(self, method, name, extra_tags, *args, **kwargs): | ||||
|         pin = Pin.get_from(self) | ||||
|         if not pin or not pin.enabled(): | ||||
|             return method(*args, **kwargs) | ||||
|         service = pin.service | ||||
| 
 | ||||
|         with pin.tracer.trace(name, service=service) as s: | ||||
|             s.set_tags(pin.tags) | ||||
|             s.set_tags(extra_tags) | ||||
| 
 | ||||
|             return method(*args, **kwargs) | ||||
| 
 | ||||
|     def cursor(self, *args, **kwargs): | ||||
|         cursor = self.__wrapped__.cursor(*args, **kwargs) | ||||
|         pin = Pin.get_from(self) | ||||
|         if not pin: | ||||
|             return cursor | ||||
|         return self._self_cursor_cls(cursor, pin) | ||||
| 
 | ||||
|     def commit(self, *args, **kwargs): | ||||
|         span_name = '{}.{}'.format(self._self_datadog_name, 'commit') | ||||
|         return self._trace_method(self.__wrapped__.commit, span_name, {}, *args, **kwargs) | ||||
| 
 | ||||
|     def rollback(self, *args, **kwargs): | ||||
|         span_name = '{}.{}'.format(self._self_datadog_name, 'rollback') | ||||
|         return self._trace_method(self.__wrapped__.rollback, span_name, {}, *args, **kwargs) | ||||
| 
 | ||||
| 
 | ||||
| def _get_vendor(conn): | ||||
|     """ Return the vendor (e.g postgres, mysql) of the given | ||||
|         database. | ||||
|     """ | ||||
|     try: | ||||
|         name = _get_module_name(conn) | ||||
|     except Exception: | ||||
|         log.debug('couldnt parse module name', exc_info=True) | ||||
|         name = 'sql' | ||||
|     return sql.normalize_vendor(name) | ||||
| 
 | ||||
| 
 | ||||
| def _get_module_name(conn): | ||||
|     return conn.__class__.__module__.split('.')[0] | ||||
|  | @ -0,0 +1,101 @@ | |||
| """ | ||||
| The Django integration will trace users requests, template renderers, database and cache | ||||
| calls. | ||||
| 
 | ||||
| **Note:** by default the tracer is **disabled** (will not send spans) when | ||||
| the Django setting ``DEBUG`` is ``True``. This can be overridden by explicitly enabling | ||||
| the tracer with ``DATADOG_TRACE['ENABLED'] = True``, as described below. | ||||
| 
 | ||||
| To enable the Django integration, add the application to your installed | ||||
| apps, as follows:: | ||||
| 
 | ||||
|     INSTALLED_APPS = [ | ||||
|         # your Django apps... | ||||
| 
 | ||||
|         # the order is not important | ||||
|         'ddtrace.contrib.django', | ||||
|     ] | ||||
| 
 | ||||
| The configuration for this integration is namespaced under the ``DATADOG_TRACE`` | ||||
| Django setting. For example, your ``settings.py`` may contain:: | ||||
| 
 | ||||
|     DATADOG_TRACE = { | ||||
|         'DEFAULT_SERVICE': 'my-django-app', | ||||
|         'TAGS': {'env': 'production'}, | ||||
|     } | ||||
| 
 | ||||
| If you need to access to Datadog settings, you can:: | ||||
| 
 | ||||
|     from ddtrace.contrib.django.conf import settings | ||||
| 
 | ||||
|     tracer = settings.TRACER | ||||
|     tracer.trace("something") | ||||
|     # your code ... | ||||
| 
 | ||||
| To have Django capture the tracer logs, ensure the ``LOGGING`` variable in | ||||
| ``settings.py`` looks similar to:: | ||||
| 
 | ||||
|     LOGGING = { | ||||
|         'loggers': { | ||||
|             'ddtrace': { | ||||
|                 'handlers': ['console'], | ||||
|                 'level': 'WARNING', | ||||
|             }, | ||||
|         }, | ||||
|     } | ||||
| 
 | ||||
| 
 | ||||
| The available settings are: | ||||
| 
 | ||||
| * ``DEFAULT_SERVICE`` (default: ``'django'``): set the service name used by the | ||||
|   tracer. Usually this configuration must be updated with a meaningful name. | ||||
| * ``DEFAULT_DATABASE_PREFIX`` (default: ``''``): set a prefix value to database services, | ||||
|   so that your service is listed such as `prefix-defaultdb`. | ||||
| * ``DEFAULT_CACHE_SERVICE`` (default: ``''``): set the django cache service name used | ||||
|   by the tracer. Change this name if you want to see django cache spans as a cache application. | ||||
| * ``TAGS`` (default: ``{}``): set global tags that should be applied to all | ||||
|   spans. | ||||
| * ``TRACER`` (default: ``ddtrace.tracer``): set the default tracer | ||||
|   instance that is used to trace Django internals. By default the ``ddtrace`` | ||||
|   tracer is used. | ||||
| * ``ENABLED`` (default: ``not django_settings.DEBUG``): defines if the tracer is | ||||
|   enabled or not. If set to false, the code is still instrumented but no spans | ||||
|   are sent to the trace agent. This setting cannot be changed at runtime | ||||
|   and a restart is required. By default the tracer is disabled when in ``DEBUG`` | ||||
|   mode, enabled otherwise. | ||||
| * ``DISTRIBUTED_TRACING`` (default: ``True``): defines if the tracer should | ||||
|   use incoming X-DATADOG-* HTTP headers to extend a trace created remotely. It is | ||||
|   required for distributed tracing if this application is called remotely from another | ||||
|   instrumented application. | ||||
|   We suggest to enable it only for internal services where headers are under your control. | ||||
| * ``ANALYTICS_ENABLED`` (default: ``None``): enables APM events in Trace Search & Analytics. | ||||
| * ``AGENT_HOSTNAME`` (default: ``localhost``): define the hostname of the trace agent. | ||||
| * ``AGENT_PORT`` (default: ``8126``): define the port of the trace agent. | ||||
| * ``AUTO_INSTRUMENT`` (default: ``True``): if set to false the code will not be | ||||
|   instrumented (even if ``INSTRUMENT_DATABASE``, ``INSTRUMENT_CACHE`` or | ||||
|   ``INSTRUMENT_TEMPLATE`` are set to ``True``), while the tracer may be active | ||||
|   for your internal usage. This could be useful if you want to use the Django | ||||
|   integration, but you want to trace only particular functions or views. If set | ||||
|   to False, the request middleware will be disabled even if present. | ||||
| * ``INSTRUMENT_DATABASE`` (default: ``True``): if set to ``False`` database will not | ||||
|   be instrumented. Only configurable when ``AUTO_INSTRUMENT`` is set to ``True``. | ||||
| * ``INSTRUMENT_CACHE`` (default: ``True``): if set to ``False`` cache will not | ||||
|   be instrumented. Only configurable when ``AUTO_INSTRUMENT`` is set to ``True``. | ||||
| * ``INSTRUMENT_TEMPLATE`` (default: ``True``): if set to ``False`` template | ||||
|   rendering will not be instrumented. Only configurable when ``AUTO_INSTRUMENT`` | ||||
|   is set to ``True``. | ||||
| """ | ||||
| from ...utils.importlib import require_modules | ||||
| 
 | ||||
| 
 | ||||
| required_modules = ['django'] | ||||
| 
 | ||||
| with require_modules(required_modules) as missing_modules: | ||||
|     if not missing_modules: | ||||
|         from .middleware import TraceMiddleware, TraceExceptionMiddleware | ||||
|         from .patch import patch | ||||
|         __all__ = ['TraceMiddleware', 'TraceExceptionMiddleware', 'patch'] | ||||
| 
 | ||||
| 
 | ||||
| # define the Django app configuration | ||||
| default_app_config = 'ddtrace.contrib.django.apps.TracerConfig' | ||||
|  | @ -0,0 +1,19 @@ | |||
| # 3rd party | ||||
| from django.apps import AppConfig, apps | ||||
| 
 | ||||
| # project | ||||
| from .patch import apply_django_patches | ||||
| 
 | ||||
| 
 | ||||
| class TracerConfig(AppConfig): | ||||
|     name = 'ddtrace.contrib.django' | ||||
|     label = 'datadog_django' | ||||
| 
 | ||||
|     def ready(self): | ||||
|         """ | ||||
|         Ready is called as soon as the registry is fully populated. | ||||
|         Tracing capabilities must be enabled in this function so that | ||||
|         all Django internals are properly configured. | ||||
|         """ | ||||
|         rest_framework_is_installed = apps.is_installed('rest_framework') | ||||
|         apply_django_patches(patch_rest_framework=rest_framework_is_installed) | ||||
|  | @ -0,0 +1,111 @@ | |||
| from functools import wraps | ||||
| 
 | ||||
| from django.conf import settings as django_settings | ||||
| 
 | ||||
| from ...ext import SpanTypes | ||||
| from ...internal.logger import get_logger | ||||
| from .conf import settings, import_from_string | ||||
| from .utils import quantize_key_values, _resource_from_cache_prefix | ||||
| 
 | ||||
| 
 | ||||
| log = get_logger(__name__) | ||||
| 
 | ||||
| # code instrumentation | ||||
| DATADOG_NAMESPACE = '__datadog_original_{method}' | ||||
| TRACED_METHODS = [ | ||||
|     'get', | ||||
|     'set', | ||||
|     'add', | ||||
|     'delete', | ||||
|     'incr', | ||||
|     'decr', | ||||
|     'get_many', | ||||
|     'set_many', | ||||
|     'delete_many', | ||||
| ] | ||||
| 
 | ||||
| # standard tags | ||||
| CACHE_BACKEND = 'django.cache.backend' | ||||
| CACHE_COMMAND_KEY = 'django.cache.key' | ||||
| 
 | ||||
| 
 | ||||
| def patch_cache(tracer): | ||||
|     """ | ||||
|     Function that patches the inner cache system. Because the cache backend | ||||
|     can have different implementations and connectors, this function must | ||||
|     handle all possible interactions with the Django cache. What follows | ||||
|     is currently traced: | ||||
| 
 | ||||
|     * in-memory cache | ||||
|     * the cache client wrapper that could use any of the common | ||||
|       Django supported cache servers (Redis, Memcached, Database, Custom) | ||||
|     """ | ||||
|     # discover used cache backends | ||||
|     cache_backends = set([cache['BACKEND'] for cache in django_settings.CACHES.values()]) | ||||
| 
 | ||||
|     def _trace_operation(fn, method_name): | ||||
|         """ | ||||
|         Return a wrapped function that traces a cache operation | ||||
|         """ | ||||
|         cache_service_name = settings.DEFAULT_CACHE_SERVICE \ | ||||
|             if settings.DEFAULT_CACHE_SERVICE else settings.DEFAULT_SERVICE | ||||
| 
 | ||||
|         @wraps(fn) | ||||
|         def wrapped(self, *args, **kwargs): | ||||
|             # get the original function method | ||||
|             method = getattr(self, DATADOG_NAMESPACE.format(method=method_name)) | ||||
|             with tracer.trace('django.cache', span_type=SpanTypes.CACHE, service=cache_service_name) as span: | ||||
|                 # update the resource name and tag the cache backend | ||||
|                 span.resource = _resource_from_cache_prefix(method_name, self) | ||||
|                 cache_backend = '{}.{}'.format(self.__module__, self.__class__.__name__) | ||||
|                 span.set_tag(CACHE_BACKEND, cache_backend) | ||||
| 
 | ||||
|                 if args: | ||||
|                     keys = quantize_key_values(args[0]) | ||||
|                     span.set_tag(CACHE_COMMAND_KEY, keys) | ||||
| 
 | ||||
|                 return method(*args, **kwargs) | ||||
|         return wrapped | ||||
| 
 | ||||
|     def _wrap_method(cls, method_name): | ||||
|         """ | ||||
|         For the given class, wraps the method name with a traced operation | ||||
|         so that the original method is executed, while the span is properly | ||||
|         created | ||||
|         """ | ||||
|         # check if the backend owns the given bounded method | ||||
|         if not hasattr(cls, method_name): | ||||
|             return | ||||
| 
 | ||||
|         # prevent patching each backend's method more than once | ||||
|         if hasattr(cls, DATADOG_NAMESPACE.format(method=method_name)): | ||||
|             log.debug('%s already traced', method_name) | ||||
|         else: | ||||
|             method = getattr(cls, method_name) | ||||
|             setattr(cls, DATADOG_NAMESPACE.format(method=method_name), method) | ||||
|             setattr(cls, method_name, _trace_operation(method, method_name)) | ||||
| 
 | ||||
|     # trace all backends | ||||
|     for cache_module in cache_backends: | ||||
|         cache = import_from_string(cache_module, cache_module) | ||||
| 
 | ||||
|         for method in TRACED_METHODS: | ||||
|             _wrap_method(cache, method) | ||||
| 
 | ||||
| 
 | ||||
| def unpatch_method(cls, method_name): | ||||
|     method = getattr(cls, DATADOG_NAMESPACE.format(method=method_name), None) | ||||
|     if method is None: | ||||
|         log.debug('nothing to do, the class is not patched') | ||||
|         return | ||||
|     setattr(cls, method_name, method) | ||||
|     delattr(cls, DATADOG_NAMESPACE.format(method=method_name)) | ||||
| 
 | ||||
| 
 | ||||
| def unpatch_cache(): | ||||
|     cache_backends = set([cache['BACKEND'] for cache in django_settings.CACHES.values()]) | ||||
|     for cache_module in cache_backends: | ||||
|         cache = import_from_string(cache_module, cache_module) | ||||
| 
 | ||||
|         for method in TRACED_METHODS: | ||||
|             unpatch_method(cache, method) | ||||
|  | @ -0,0 +1,27 @@ | |||
| import django | ||||
| 
 | ||||
| 
 | ||||
| if django.VERSION >= (1, 10, 1): | ||||
|     from django.urls import get_resolver | ||||
| 
 | ||||
|     def user_is_authenticated(user): | ||||
|         # Explicit comparison due to the following bug | ||||
|         # https://code.djangoproject.com/ticket/26988 | ||||
|         return user.is_authenticated == True  # noqa E712 | ||||
| else: | ||||
|     from django.conf import settings | ||||
|     from django.core import urlresolvers | ||||
| 
 | ||||
|     def user_is_authenticated(user): | ||||
|         return user.is_authenticated() | ||||
| 
 | ||||
|     if django.VERSION >= (1, 9, 0): | ||||
|         def get_resolver(urlconf=None): | ||||
|             urlconf = urlconf or settings.ROOT_URLCONF | ||||
|             urlresolvers.set_urlconf(urlconf) | ||||
|             return urlresolvers.get_resolver(urlconf) | ||||
|     else: | ||||
|         def get_resolver(urlconf=None): | ||||
|             urlconf = urlconf or settings.ROOT_URLCONF | ||||
|             urlresolvers.set_urlconf(urlconf) | ||||
|             return urlresolvers.RegexURLResolver(r'^/', urlconf) | ||||
|  | @ -0,0 +1,163 @@ | |||
| """ | ||||
| Settings for Datadog tracer are all namespaced in the DATADOG_TRACE setting. | ||||
| For example your project's `settings.py` file might look like this:: | ||||
| 
 | ||||
|     DATADOG_TRACE = { | ||||
|         'TRACER': 'myapp.tracer', | ||||
|     } | ||||
| 
 | ||||
| This module provides the `setting` object, that is used to access | ||||
| Datadog settings, checking for user settings first, then falling | ||||
| back to the defaults. | ||||
| """ | ||||
| from __future__ import unicode_literals | ||||
| 
 | ||||
| import os | ||||
| import importlib | ||||
| 
 | ||||
| from django.conf import settings as django_settings | ||||
| 
 | ||||
| from ...internal.logger import get_logger | ||||
| 
 | ||||
| 
 | ||||
| log = get_logger(__name__) | ||||
| 
 | ||||
| # List of available settings with their defaults | ||||
| DEFAULTS = { | ||||
|     'AGENT_HOSTNAME': 'localhost', | ||||
|     'AGENT_PORT': 8126, | ||||
|     'AUTO_INSTRUMENT': True, | ||||
|     'INSTRUMENT_CACHE': True, | ||||
|     'INSTRUMENT_DATABASE': True, | ||||
|     'INSTRUMENT_TEMPLATE': True, | ||||
|     'DEFAULT_DATABASE_PREFIX': '', | ||||
|     'DEFAULT_SERVICE': 'django', | ||||
|     'DEFAULT_CACHE_SERVICE': '', | ||||
|     'ENABLED': True, | ||||
|     'DISTRIBUTED_TRACING': True, | ||||
|     'ANALYTICS_ENABLED': None, | ||||
|     'ANALYTICS_SAMPLE_RATE': True, | ||||
|     'TRACE_QUERY_STRING': None, | ||||
|     'TAGS': {}, | ||||
|     'TRACER': 'ddtrace.tracer', | ||||
| } | ||||
| 
 | ||||
| # List of settings that may be in string import notation. | ||||
| IMPORT_STRINGS = ( | ||||
|     'TRACER', | ||||
| ) | ||||
| 
 | ||||
| # List of settings that have been removed | ||||
| REMOVED_SETTINGS = () | ||||
| 
 | ||||
| 
 | ||||
| def import_from_string(val, setting_name): | ||||
|     """ | ||||
|     Attempt to import a class from a string representation. | ||||
|     """ | ||||
|     try: | ||||
|         # Nod to tastypie's use of importlib. | ||||
|         parts = val.split('.') | ||||
|         module_path, class_name = '.'.join(parts[:-1]), parts[-1] | ||||
|         module = importlib.import_module(module_path) | ||||
|         return getattr(module, class_name) | ||||
|     except (ImportError, AttributeError) as e: | ||||
|         msg = 'Could not import "{}" for setting "{}". {}: {}.'.format( | ||||
|             val, | ||||
|             setting_name, | ||||
|             e.__class__.__name__, | ||||
|             e, | ||||
|         ) | ||||
| 
 | ||||
|         raise ImportError(msg) | ||||
| 
 | ||||
| 
 | ||||
| class DatadogSettings(object): | ||||
|     """ | ||||
|     A settings object, that allows Datadog settings to be accessed as properties. | ||||
|     For example: | ||||
| 
 | ||||
|         from ddtrace.contrib.django.conf import settings | ||||
| 
 | ||||
|         tracer = settings.TRACER | ||||
| 
 | ||||
|     Any setting with string import paths will be automatically resolved | ||||
|     and return the class, rather than the string literal. | ||||
|     """ | ||||
|     def __init__(self, user_settings=None, defaults=None, import_strings=None): | ||||
|         if user_settings: | ||||
|             self._user_settings = self.__check_user_settings(user_settings) | ||||
| 
 | ||||
|         self.defaults = defaults or DEFAULTS | ||||
|         if os.environ.get('DATADOG_ENV'): | ||||
|             self.defaults['TAGS'].update({'env': os.environ.get('DATADOG_ENV')}) | ||||
|         if os.environ.get('DATADOG_SERVICE_NAME'): | ||||
|             self.defaults['DEFAULT_SERVICE'] = os.environ.get('DATADOG_SERVICE_NAME') | ||||
| 
 | ||||
|         host = os.environ.get('DD_AGENT_HOST', os.environ.get('DATADOG_TRACE_AGENT_HOSTNAME')) | ||||
|         if host: | ||||
|             self.defaults['AGENT_HOSTNAME'] = host | ||||
| 
 | ||||
|         port = os.environ.get('DD_TRACE_AGENT_PORT', os.environ.get('DATADOG_TRACE_AGENT_PORT')) | ||||
|         if port: | ||||
|             # if the agent port is a string, the underlying library that creates the socket | ||||
|             # stops working | ||||
|             try: | ||||
|                 port = int(port) | ||||
|             except ValueError: | ||||
|                 log.warning('DD_TRACE_AGENT_PORT is not an integer value; default to 8126') | ||||
|             else: | ||||
|                 self.defaults['AGENT_PORT'] = port | ||||
| 
 | ||||
|         self.import_strings = import_strings or IMPORT_STRINGS | ||||
| 
 | ||||
|     @property | ||||
|     def user_settings(self): | ||||
|         if not hasattr(self, '_user_settings'): | ||||
|             self._user_settings = getattr(django_settings, 'DATADOG_TRACE', {}) | ||||
| 
 | ||||
|         # TODO[manu]: prevents docs import errors; provide a better implementation | ||||
|         if 'ENABLED' not in self._user_settings: | ||||
|             self._user_settings['ENABLED'] = not django_settings.DEBUG | ||||
|         return self._user_settings | ||||
| 
 | ||||
|     def __getattr__(self, attr): | ||||
|         if attr not in self.defaults: | ||||
|             raise AttributeError('Invalid setting: "{}"'.format(attr)) | ||||
| 
 | ||||
|         try: | ||||
|             # Check if present in user settings | ||||
|             val = self.user_settings[attr] | ||||
|         except KeyError: | ||||
|             # Otherwise, fall back to defaults | ||||
|             val = self.defaults[attr] | ||||
| 
 | ||||
|         # Coerce import strings into classes | ||||
|         if attr in self.import_strings: | ||||
|             val = import_from_string(val, attr) | ||||
| 
 | ||||
|         # Cache the result | ||||
|         setattr(self, attr, val) | ||||
|         return val | ||||
| 
 | ||||
|     def __check_user_settings(self, user_settings): | ||||
|         SETTINGS_DOC = 'http://pypi.datadoghq.com/trace/docs/#module-ddtrace.contrib.django' | ||||
|         for setting in REMOVED_SETTINGS: | ||||
|             if setting in user_settings: | ||||
|                 raise RuntimeError( | ||||
|                     'The "{}" setting has been removed, check "{}".'.format(setting, SETTINGS_DOC) | ||||
|                 ) | ||||
|         return user_settings | ||||
| 
 | ||||
| 
 | ||||
| settings = DatadogSettings(None, DEFAULTS, IMPORT_STRINGS) | ||||
| 
 | ||||
| 
 | ||||
| def reload_settings(*args, **kwargs): | ||||
|     """ | ||||
|     Triggers a reload when Django emits the reloading signal | ||||
|     """ | ||||
|     global settings | ||||
|     setting, value = kwargs['setting'], kwargs['value'] | ||||
|     if setting == 'DATADOG_TRACE': | ||||
|         settings = DatadogSettings(value, DEFAULTS, IMPORT_STRINGS) | ||||
|  | @ -0,0 +1,76 @@ | |||
| from django.db import connections | ||||
| 
 | ||||
| # project | ||||
| from ...ext import sql as sqlx | ||||
| from ...internal.logger import get_logger | ||||
| from ...pin import Pin | ||||
| 
 | ||||
| from .conf import settings | ||||
| from ..dbapi import TracedCursor as DbApiTracedCursor | ||||
| 
 | ||||
| log = get_logger(__name__) | ||||
| 
 | ||||
| CURSOR_ATTR = '_datadog_original_cursor' | ||||
| ALL_CONNS_ATTR = '_datadog_original_connections_all' | ||||
| 
 | ||||
| 
 | ||||
| def patch_db(tracer): | ||||
|     if hasattr(connections, ALL_CONNS_ATTR): | ||||
|         log.debug('db already patched') | ||||
|         return | ||||
|     setattr(connections, ALL_CONNS_ATTR, connections.all) | ||||
| 
 | ||||
|     def all_connections(self): | ||||
|         conns = getattr(self, ALL_CONNS_ATTR)() | ||||
|         for conn in conns: | ||||
|             patch_conn(tracer, conn) | ||||
|         return conns | ||||
| 
 | ||||
|     connections.all = all_connections.__get__(connections, type(connections)) | ||||
| 
 | ||||
| 
 | ||||
| def unpatch_db(): | ||||
|     for c in connections.all(): | ||||
|         unpatch_conn(c) | ||||
| 
 | ||||
|     all_connections = getattr(connections, ALL_CONNS_ATTR, None) | ||||
|     if all_connections is None: | ||||
|         log.debug('nothing to do, the db is not patched') | ||||
|         return | ||||
|     connections.all = all_connections | ||||
|     delattr(connections, ALL_CONNS_ATTR) | ||||
| 
 | ||||
| 
 | ||||
| def patch_conn(tracer, conn): | ||||
|     if hasattr(conn, CURSOR_ATTR): | ||||
|         return | ||||
| 
 | ||||
|     setattr(conn, CURSOR_ATTR, conn.cursor) | ||||
| 
 | ||||
|     def cursor(): | ||||
|         database_prefix = ( | ||||
|             '{}-'.format(settings.DEFAULT_DATABASE_PREFIX) | ||||
|             if settings.DEFAULT_DATABASE_PREFIX else '' | ||||
|         ) | ||||
|         alias = getattr(conn, 'alias', 'default') | ||||
|         service = '{}{}{}'.format(database_prefix, alias, 'db') | ||||
|         vendor = getattr(conn, 'vendor', 'db') | ||||
|         prefix = sqlx.normalize_vendor(vendor) | ||||
|         tags = { | ||||
|             'django.db.vendor': vendor, | ||||
|             'django.db.alias': alias, | ||||
|         } | ||||
| 
 | ||||
|         pin = Pin(service, tags=tags, tracer=tracer, app=prefix) | ||||
|         return DbApiTracedCursor(conn._datadog_original_cursor(), pin) | ||||
| 
 | ||||
|     conn.cursor = cursor | ||||
| 
 | ||||
| 
 | ||||
| def unpatch_conn(conn): | ||||
|     cursor = getattr(conn, CURSOR_ATTR, None) | ||||
|     if cursor is None: | ||||
|         log.debug('nothing to do, the connection is not patched') | ||||
|         return | ||||
|     conn.cursor = cursor | ||||
|     delattr(conn, CURSOR_ATTR) | ||||
|  | @ -0,0 +1,230 @@ | |||
| # project | ||||
| from .conf import settings | ||||
| from .compat import user_is_authenticated, get_resolver | ||||
| from .utils import get_request_uri | ||||
| 
 | ||||
| from ...constants import ANALYTICS_SAMPLE_RATE_KEY | ||||
| from ...contrib import func_name | ||||
| from ...ext import SpanTypes, http | ||||
| from ...internal.logger import get_logger | ||||
| from ...propagation.http import HTTPPropagator | ||||
| from ...settings import config | ||||
| 
 | ||||
| # 3p | ||||
| from django.core.exceptions import MiddlewareNotUsed | ||||
| from django.conf import settings as django_settings | ||||
| import django | ||||
| 
 | ||||
| try: | ||||
|     from django.utils.deprecation import MiddlewareMixin | ||||
| 
 | ||||
|     MiddlewareClass = MiddlewareMixin | ||||
| except ImportError: | ||||
|     MiddlewareClass = object | ||||
| 
 | ||||
| log = get_logger(__name__) | ||||
| 
 | ||||
| EXCEPTION_MIDDLEWARE = "ddtrace.contrib.django.TraceExceptionMiddleware" | ||||
| TRACE_MIDDLEWARE = "ddtrace.contrib.django.TraceMiddleware" | ||||
| MIDDLEWARE = "MIDDLEWARE" | ||||
| MIDDLEWARE_CLASSES = "MIDDLEWARE_CLASSES" | ||||
| 
 | ||||
| # Default views list available from: | ||||
| #   https://github.com/django/django/blob/38e2fdadfd9952e751deed662edf4c496d238f28/django/views/defaults.py | ||||
| # DEV: Django doesn't call `process_view` when falling back to one of these internal error handling views | ||||
| # DEV: We only use these names when `span.resource == 'unknown'` and we have one of these status codes | ||||
| _django_default_views = { | ||||
|     400: "django.views.defaults.bad_request", | ||||
|     403: "django.views.defaults.permission_denied", | ||||
|     404: "django.views.defaults.page_not_found", | ||||
|     500: "django.views.defaults.server_error", | ||||
| } | ||||
| 
 | ||||
| 
 | ||||
| def _analytics_enabled(): | ||||
|     return ( | ||||
|         (config.analytics_enabled and settings.ANALYTICS_ENABLED is not False) or settings.ANALYTICS_ENABLED is True | ||||
|     ) and settings.ANALYTICS_SAMPLE_RATE is not None | ||||
| 
 | ||||
| 
 | ||||
| def get_middleware_insertion_point(): | ||||
|     """Returns the attribute name and collection object for the Django middleware. | ||||
| 
 | ||||
|     If middleware cannot be found, returns None for the middleware collection. | ||||
|     """ | ||||
|     middleware = getattr(django_settings, MIDDLEWARE, None) | ||||
|     # Prioritise MIDDLEWARE over ..._CLASSES, but only in 1.10 and later. | ||||
|     if middleware is not None and django.VERSION >= (1, 10): | ||||
|         return MIDDLEWARE, middleware | ||||
|     return MIDDLEWARE_CLASSES, getattr(django_settings, MIDDLEWARE_CLASSES, None) | ||||
| 
 | ||||
| 
 | ||||
| def insert_trace_middleware(): | ||||
|     middleware_attribute, middleware = get_middleware_insertion_point() | ||||
|     if middleware is not None and TRACE_MIDDLEWARE not in set(middleware): | ||||
|         setattr(django_settings, middleware_attribute, type(middleware)((TRACE_MIDDLEWARE,)) + middleware) | ||||
| 
 | ||||
| 
 | ||||
| def remove_trace_middleware(): | ||||
|     _, middleware = get_middleware_insertion_point() | ||||
|     if middleware and TRACE_MIDDLEWARE in set(middleware): | ||||
|         middleware.remove(TRACE_MIDDLEWARE) | ||||
| 
 | ||||
| 
 | ||||
| def insert_exception_middleware(): | ||||
|     middleware_attribute, middleware = get_middleware_insertion_point() | ||||
|     if middleware is not None and EXCEPTION_MIDDLEWARE not in set(middleware): | ||||
|         setattr(django_settings, middleware_attribute, middleware + type(middleware)((EXCEPTION_MIDDLEWARE,))) | ||||
| 
 | ||||
| 
 | ||||
| def remove_exception_middleware(): | ||||
|     _, middleware = get_middleware_insertion_point() | ||||
|     if middleware and EXCEPTION_MIDDLEWARE in set(middleware): | ||||
|         middleware.remove(EXCEPTION_MIDDLEWARE) | ||||
| 
 | ||||
| 
 | ||||
| class InstrumentationMixin(MiddlewareClass): | ||||
|     """ | ||||
|     Useful mixin base class for tracing middlewares | ||||
|     """ | ||||
| 
 | ||||
|     def __init__(self, get_response=None): | ||||
|         # disable the middleware if the tracer is not enabled | ||||
|         # or if the auto instrumentation is disabled | ||||
|         self.get_response = get_response | ||||
|         if not settings.AUTO_INSTRUMENT: | ||||
|             raise MiddlewareNotUsed | ||||
| 
 | ||||
| 
 | ||||
| class TraceExceptionMiddleware(InstrumentationMixin): | ||||
|     """ | ||||
|     Middleware that traces exceptions raised | ||||
|     """ | ||||
| 
 | ||||
|     def process_exception(self, request, exception): | ||||
|         try: | ||||
|             span = _get_req_span(request) | ||||
|             if span: | ||||
|                 span.set_tag(http.STATUS_CODE, "500") | ||||
|                 span.set_traceback()  # will set the exception info | ||||
|         except Exception: | ||||
|             log.debug("error processing exception", exc_info=True) | ||||
| 
 | ||||
| 
 | ||||
| class TraceMiddleware(InstrumentationMixin): | ||||
|     """ | ||||
|     Middleware that traces Django requests | ||||
|     """ | ||||
| 
 | ||||
|     def process_request(self, request): | ||||
|         tracer = settings.TRACER | ||||
|         if settings.DISTRIBUTED_TRACING: | ||||
|             propagator = HTTPPropagator() | ||||
|             context = propagator.extract(request.META) | ||||
|             # Only need to active the new context if something was propagated | ||||
|             if context.trace_id: | ||||
|                 tracer.context_provider.activate(context) | ||||
|         try: | ||||
|             span = tracer.trace( | ||||
|                 "django.request", | ||||
|                 service=settings.DEFAULT_SERVICE, | ||||
|                 resource="unknown",  # will be filled by process view | ||||
|                 span_type=SpanTypes.WEB, | ||||
|             ) | ||||
| 
 | ||||
|             # set analytics sample rate | ||||
|             # DEV: django is special case maintains separate configuration from config api | ||||
|             if _analytics_enabled() and settings.ANALYTICS_SAMPLE_RATE is not None: | ||||
|                 span.set_tag( | ||||
|                     ANALYTICS_SAMPLE_RATE_KEY, settings.ANALYTICS_SAMPLE_RATE, | ||||
|                 ) | ||||
| 
 | ||||
|             # Set HTTP Request tags | ||||
|             span.set_tag(http.METHOD, request.method) | ||||
|             span.set_tag(http.URL, get_request_uri(request)) | ||||
|             trace_query_string = settings.TRACE_QUERY_STRING | ||||
|             if trace_query_string is None: | ||||
|                 trace_query_string = config.django.trace_query_string | ||||
|             if trace_query_string: | ||||
|                 span.set_tag(http.QUERY_STRING, request.META["QUERY_STRING"]) | ||||
|             _set_req_span(request, span) | ||||
|         except Exception: | ||||
|             log.debug("error tracing request", exc_info=True) | ||||
| 
 | ||||
|     def process_view(self, request, view_func, *args, **kwargs): | ||||
|         span = _get_req_span(request) | ||||
|         if span: | ||||
|             span.resource = func_name(view_func) | ||||
| 
 | ||||
|     def process_response(self, request, response): | ||||
|         try: | ||||
|             span = _get_req_span(request) | ||||
|             if span: | ||||
|                 if response.status_code < 500 and span.error: | ||||
|                     # remove any existing stack trace since it must have been | ||||
|                     # handled appropriately | ||||
|                     span._remove_exc_info() | ||||
| 
 | ||||
|                 # If `process_view` was not called, try to determine the correct `span.resource` to set | ||||
|                 # DEV: `process_view` won't get called if a middle `process_request` returns an HttpResponse | ||||
|                 # DEV: `process_view` won't get called when internal error handlers are used (e.g. for 404 responses) | ||||
|                 if span.resource == "unknown": | ||||
|                     try: | ||||
|                         # Attempt to lookup the view function from the url resolver | ||||
|                         #   https://github.com/django/django/blob/38e2fdadfd9952e751deed662edf4c496d238f28/django/core/handlers/base.py#L104-L113  # noqa | ||||
|                         urlconf = None | ||||
|                         if hasattr(request, "urlconf"): | ||||
|                             urlconf = request.urlconf | ||||
|                         resolver = get_resolver(urlconf) | ||||
| 
 | ||||
|                         # Try to resolve the Django view for handling this request | ||||
|                         if getattr(request, "request_match", None): | ||||
|                             request_match = request.request_match | ||||
|                         else: | ||||
|                             # This may raise a `django.urls.exceptions.Resolver404` exception | ||||
|                             request_match = resolver.resolve(request.path_info) | ||||
|                         span.resource = func_name(request_match.func) | ||||
|                     except Exception: | ||||
|                         log.debug("error determining request view function", exc_info=True) | ||||
| 
 | ||||
|                         # If the view could not be found, try to set from a static list of | ||||
|                         # known internal error handler views | ||||
|                         span.resource = _django_default_views.get(response.status_code, "unknown") | ||||
| 
 | ||||
|                 span.set_tag(http.STATUS_CODE, response.status_code) | ||||
|                 span = _set_auth_tags(span, request) | ||||
|                 span.finish() | ||||
|         except Exception: | ||||
|             log.debug("error tracing request", exc_info=True) | ||||
|         finally: | ||||
|             return response | ||||
| 
 | ||||
| 
 | ||||
| def _get_req_span(request): | ||||
|     """ Return the datadog span from the given request. """ | ||||
|     return getattr(request, "_datadog_request_span", None) | ||||
| 
 | ||||
| 
 | ||||
| def _set_req_span(request, span): | ||||
|     """ Set the datadog span on the given request. """ | ||||
|     return setattr(request, "_datadog_request_span", span) | ||||
| 
 | ||||
| 
 | ||||
| def _set_auth_tags(span, request): | ||||
|     """ Patch any available auth tags from the request onto the span. """ | ||||
|     user = getattr(request, "user", None) | ||||
|     if not user: | ||||
|         return span | ||||
| 
 | ||||
|     if hasattr(user, "is_authenticated"): | ||||
|         span.set_tag("django.user.is_authenticated", user_is_authenticated(user)) | ||||
| 
 | ||||
|     uid = getattr(user, "pk", None) | ||||
|     if uid: | ||||
|         span.set_tag("django.user.id", uid) | ||||
| 
 | ||||
|     uname = getattr(user, "username", None) | ||||
|     if uname: | ||||
|         span.set_tag("django.user.name", uname) | ||||
| 
 | ||||
|     return span | ||||
|  | @ -0,0 +1,94 @@ | |||
| # 3rd party | ||||
| from ddtrace.vendor import wrapt | ||||
| import django | ||||
| from django.db import connections | ||||
| 
 | ||||
| # project | ||||
| from .db import patch_db | ||||
| from .conf import settings | ||||
| from .cache import patch_cache | ||||
| from .templates import patch_template | ||||
| from .middleware import insert_exception_middleware, insert_trace_middleware | ||||
| 
 | ||||
| from ...internal.logger import get_logger | ||||
| 
 | ||||
| log = get_logger(__name__) | ||||
| 
 | ||||
| 
 | ||||
| def patch(): | ||||
|     """Patch the instrumented methods | ||||
|     """ | ||||
|     if getattr(django, '_datadog_patch', False): | ||||
|         return | ||||
|     setattr(django, '_datadog_patch', True) | ||||
| 
 | ||||
|     _w = wrapt.wrap_function_wrapper | ||||
|     _w('django', 'setup', traced_setup) | ||||
| 
 | ||||
| 
 | ||||
| def traced_setup(wrapped, instance, args, kwargs): | ||||
|     from django.conf import settings | ||||
| 
 | ||||
|     if 'ddtrace.contrib.django' not in settings.INSTALLED_APPS: | ||||
|         if isinstance(settings.INSTALLED_APPS, tuple): | ||||
|             # INSTALLED_APPS is a tuple < 1.9 | ||||
|             settings.INSTALLED_APPS = settings.INSTALLED_APPS + ('ddtrace.contrib.django', ) | ||||
|         else: | ||||
|             settings.INSTALLED_APPS.append('ddtrace.contrib.django') | ||||
| 
 | ||||
|     wrapped(*args, **kwargs) | ||||
| 
 | ||||
| 
 | ||||
| def apply_django_patches(patch_rest_framework): | ||||
|     """ | ||||
|     Ready is called as soon as the registry is fully populated. | ||||
|     In order for all Django internals are properly configured, this | ||||
|     must be called after the app is finished starting | ||||
|     """ | ||||
|     tracer = settings.TRACER | ||||
| 
 | ||||
|     if settings.TAGS: | ||||
|         tracer.set_tags(settings.TAGS) | ||||
| 
 | ||||
|     # configure the tracer instance | ||||
|     # TODO[manu]: we may use configure() but because it creates a new | ||||
|     # AgentWriter, it breaks all tests. The configure() behavior must | ||||
|     # be changed to use it in this integration | ||||
|     tracer.enabled = settings.ENABLED | ||||
|     tracer.writer.api.hostname = settings.AGENT_HOSTNAME | ||||
|     tracer.writer.api.port = settings.AGENT_PORT | ||||
| 
 | ||||
|     if settings.AUTO_INSTRUMENT: | ||||
|         # trace Django internals | ||||
|         insert_trace_middleware() | ||||
|         insert_exception_middleware() | ||||
| 
 | ||||
|         if settings.INSTRUMENT_TEMPLATE: | ||||
|             try: | ||||
|                 patch_template(tracer) | ||||
|             except Exception: | ||||
|                 log.exception('error patching Django template rendering') | ||||
| 
 | ||||
|         if settings.INSTRUMENT_DATABASE: | ||||
|             try: | ||||
|                 patch_db(tracer) | ||||
|                 # This is the trigger to patch individual connections. | ||||
|                 # By patching these here, all processes including | ||||
|                 # management commands are also traced. | ||||
|                 connections.all() | ||||
|             except Exception: | ||||
|                 log.exception('error patching Django database connections') | ||||
| 
 | ||||
|         if settings.INSTRUMENT_CACHE: | ||||
|             try: | ||||
|                 patch_cache(tracer) | ||||
|             except Exception: | ||||
|                 log.exception('error patching Django cache') | ||||
| 
 | ||||
|         # Instrument rest_framework app to trace custom exception handling. | ||||
|         if patch_rest_framework: | ||||
|             try: | ||||
|                 from .restframework import patch_restframework | ||||
|                 patch_restframework(tracer) | ||||
|             except Exception: | ||||
|                 log.exception('error patching rest_framework app') | ||||
|  | @ -0,0 +1,42 @@ | |||
| from ddtrace.vendor.wrapt import wrap_function_wrapper as wrap | ||||
| 
 | ||||
| from rest_framework.views import APIView | ||||
| 
 | ||||
| from ...utils.wrappers import unwrap | ||||
| 
 | ||||
| 
 | ||||
| def patch_restframework(tracer): | ||||
|     """ Patches rest_framework app. | ||||
| 
 | ||||
|     To trace exceptions occuring during view processing we currently use a TraceExceptionMiddleware. | ||||
|     However the rest_framework handles exceptions before they come to our middleware. | ||||
|     So we need to manually patch the rest_framework exception handler | ||||
|     to set the exception stack trace in the current span. | ||||
| 
 | ||||
|     """ | ||||
| 
 | ||||
|     def _traced_handle_exception(wrapped, instance, args, kwargs): | ||||
|         """ Sets the error message, error type and exception stack trace to the current span | ||||
|             before calling the original exception handler. | ||||
|         """ | ||||
|         span = tracer.current_span() | ||||
|         if span is not None: | ||||
|             span.set_traceback() | ||||
| 
 | ||||
|         return wrapped(*args, **kwargs) | ||||
| 
 | ||||
|     # do not patch if already patched | ||||
|     if getattr(APIView, '_datadog_patch', False): | ||||
|         return | ||||
|     else: | ||||
|         setattr(APIView, '_datadog_patch', True) | ||||
| 
 | ||||
|     # trace the handle_exception method | ||||
|     wrap('rest_framework.views', 'APIView.handle_exception', _traced_handle_exception) | ||||
| 
 | ||||
| 
 | ||||
| def unpatch_restframework(): | ||||
|     """ Unpatches rest_framework app.""" | ||||
|     if getattr(APIView, '_datadog_patch', False): | ||||
|         setattr(APIView, '_datadog_patch', False) | ||||
|         unwrap(APIView, 'handle_exception') | ||||
|  | @ -0,0 +1,48 @@ | |||
| """ | ||||
| code to measure django template rendering. | ||||
| """ | ||||
| # project | ||||
| from ...ext import SpanTypes | ||||
| from ...internal.logger import get_logger | ||||
| 
 | ||||
| # 3p | ||||
| from django.template import Template | ||||
| 
 | ||||
| log = get_logger(__name__) | ||||
| 
 | ||||
| RENDER_ATTR = '_datadog_original_render' | ||||
| 
 | ||||
| 
 | ||||
| def patch_template(tracer): | ||||
|     """ will patch django's template rendering function to include timing | ||||
|         and trace information. | ||||
|     """ | ||||
| 
 | ||||
|     # FIXME[matt] we're patching the template class here. ideally we'd only | ||||
|     # patch so we can use multiple tracers at once, but i suspect this is fine | ||||
|     # in practice. | ||||
|     if getattr(Template, RENDER_ATTR, None): | ||||
|         log.debug('already patched') | ||||
|         return | ||||
| 
 | ||||
|     setattr(Template, RENDER_ATTR, Template.render) | ||||
| 
 | ||||
|     def traced_render(self, context): | ||||
|         with tracer.trace('django.template', span_type=SpanTypes.TEMPLATE) as span: | ||||
|             try: | ||||
|                 return Template._datadog_original_render(self, context) | ||||
|             finally: | ||||
|                 template_name = self.name or getattr(context, 'template_name', None) or 'unknown' | ||||
|                 span.resource = template_name | ||||
|                 span.set_tag('django.template_name', template_name) | ||||
| 
 | ||||
|     Template.render = traced_render | ||||
| 
 | ||||
| 
 | ||||
| def unpatch_template(): | ||||
|     render = getattr(Template, RENDER_ATTR, None) | ||||
|     if render is None: | ||||
|         log.debug('nothing to do Template is already patched') | ||||
|         return | ||||
|     Template.render = render | ||||
|     delattr(Template, RENDER_ATTR) | ||||
|  | @ -0,0 +1,75 @@ | |||
| from ...compat import parse | ||||
| from ...internal.logger import get_logger | ||||
| 
 | ||||
| log = get_logger(__name__) | ||||
| 
 | ||||
| 
 | ||||
| def _resource_from_cache_prefix(resource, cache): | ||||
|     """ | ||||
|     Combine the resource name with the cache prefix (if any) | ||||
|     """ | ||||
|     if getattr(cache, 'key_prefix', None): | ||||
|         name = '{} {}'.format(resource, cache.key_prefix) | ||||
|     else: | ||||
|         name = resource | ||||
| 
 | ||||
|     # enforce lowercase to make the output nicer to read | ||||
|     return name.lower() | ||||
| 
 | ||||
| 
 | ||||
| def quantize_key_values(key): | ||||
|     """ | ||||
|     Used in the Django trace operation method, it ensures that if a dict | ||||
|     with values is used, we removes the values from the span meta | ||||
|     attributes. For example:: | ||||
| 
 | ||||
|         >>> quantize_key_values({'key', 'value'}) | ||||
|         # returns ['key'] | ||||
|     """ | ||||
|     if isinstance(key, dict): | ||||
|         return key.keys() | ||||
| 
 | ||||
|     return key | ||||
| 
 | ||||
| 
 | ||||
| def get_request_uri(request): | ||||
|     """ | ||||
|     Helper to rebuild the original request url | ||||
| 
 | ||||
|     query string or fragments are not included. | ||||
|     """ | ||||
|     # DEV: We do this instead of `request.build_absolute_uri()` since | ||||
|     #      an exception can get raised, we want to always build a url | ||||
|     #      regardless of any exceptions raised from `request.get_host()` | ||||
|     host = None | ||||
|     try: | ||||
|         host = request.get_host()  # this will include host:port | ||||
|     except Exception: | ||||
|         log.debug('Failed to get Django request host', exc_info=True) | ||||
| 
 | ||||
|     if not host: | ||||
|         try: | ||||
|             # Try to build host how Django would have | ||||
|             # https://github.com/django/django/blob/e8d0d2a5efc8012dcc8bf1809dec065ebde64c81/django/http/request.py#L85-L102 | ||||
|             if 'HTTP_HOST' in request.META: | ||||
|                 host = request.META['HTTP_HOST'] | ||||
|             else: | ||||
|                 host = request.META['SERVER_NAME'] | ||||
|                 port = str(request.META['SERVER_PORT']) | ||||
|                 if port != ('443' if request.is_secure() else '80'): | ||||
|                     host = '{0}:{1}'.format(host, port) | ||||
|         except Exception: | ||||
|             # This really shouldn't ever happen, but lets guard here just in case | ||||
|             log.debug('Failed to build Django request host', exc_info=True) | ||||
|             host = 'unknown' | ||||
| 
 | ||||
|     # Build request url from the information available | ||||
|     # DEV: We are explicitly omitting query strings since they may contain sensitive information | ||||
|     return parse.urlunparse(parse.ParseResult( | ||||
|         scheme=request.scheme, | ||||
|         netloc=host, | ||||
|         path=request.path, | ||||
|         params='', | ||||
|         query='', | ||||
|         fragment='', | ||||
|     )) | ||||
|  | @ -0,0 +1,48 @@ | |||
| """ | ||||
| Instrument dogpile.cache__ to report all cached lookups. | ||||
| 
 | ||||
| This will add spans around the calls to your cache backend (eg. redis, memory, | ||||
| etc). The spans will also include the following tags: | ||||
| 
 | ||||
| - key/keys: The key(s) dogpile passed to your backend. Note that this will be | ||||
|   the output of the region's ``function_key_generator``, but before any key | ||||
|   mangling is applied (ie. the region's ``key_mangler``). | ||||
| - region: Name of the region. | ||||
| - backend: Name of the backend class. | ||||
| - hit: If the key was found in the cache. | ||||
| - expired: If the key is expired. This is only relevant if the key was found. | ||||
| 
 | ||||
| While cache tracing will generally already have keys in tags, some caching | ||||
| setups will not have useful tag values - such as when you're using consistent | ||||
| hashing with memcached - the key(s) will appear as a mangled hash. | ||||
| :: | ||||
| 
 | ||||
|     # Patch before importing dogpile.cache | ||||
|     from ddtrace import patch | ||||
|     patch(dogpile_cache=True) | ||||
| 
 | ||||
|     from dogpile.cache import make_region | ||||
| 
 | ||||
|     region = make_region().configure( | ||||
|         "dogpile.cache.pylibmc", | ||||
|         expiration_time=3600, | ||||
|         arguments={"url": ["127.0.0.1"]}, | ||||
|     ) | ||||
| 
 | ||||
|     @region.cache_on_arguments() | ||||
|     def hello(name): | ||||
|         # Some complicated, slow calculation | ||||
|         return "Hello, {}".format(name) | ||||
| 
 | ||||
| .. __: https://dogpilecache.sqlalchemy.org/ | ||||
| """ | ||||
| from ...utils.importlib import require_modules | ||||
| 
 | ||||
| 
 | ||||
| required_modules = ['dogpile.cache'] | ||||
| 
 | ||||
| with require_modules(required_modules) as missing_modules: | ||||
|     if not missing_modules: | ||||
|         from .patch import patch, unpatch | ||||
| 
 | ||||
|         __all__ = ['patch', 'unpatch'] | ||||
|  | @ -0,0 +1,37 @@ | |||
| import dogpile | ||||
| 
 | ||||
| from ...pin import Pin | ||||
| from ...utils.formats import asbool | ||||
| 
 | ||||
| 
 | ||||
| def _wrap_lock_ctor(func, instance, args, kwargs): | ||||
|     """ | ||||
|     This seems rather odd. But to track hits, we need to patch the wrapped function that | ||||
|     dogpile passes to the region and locks. Unfortunately it's a closure defined inside | ||||
|     the get_or_create* methods themselves, so we can't easily patch those. | ||||
|     """ | ||||
|     func(*args, **kwargs) | ||||
|     ori_backend_fetcher = instance.value_and_created_fn | ||||
| 
 | ||||
|     def wrapped_backend_fetcher(): | ||||
|         pin = Pin.get_from(dogpile.cache) | ||||
|         if not pin or not pin.enabled(): | ||||
|             return ori_backend_fetcher() | ||||
| 
 | ||||
|         hit = False | ||||
|         expired = True | ||||
|         try: | ||||
|             value, createdtime = ori_backend_fetcher() | ||||
|             hit = value is not dogpile.cache.api.NO_VALUE | ||||
|             # dogpile sometimes returns None, but only checks for truthiness. Coalesce | ||||
|             # to minimize APM users' confusion. | ||||
|             expired = instance._is_expired(createdtime) or False | ||||
|             return value, createdtime | ||||
|         finally: | ||||
|             # Keys are checked in random order so the 'final' answer for partial hits | ||||
|             # should really be false (ie. if any are 'negative', then the tag value | ||||
|             # should be). This means ANDing all hit values and ORing all expired values. | ||||
|             span = pin.tracer.current_span() | ||||
|             span.set_tag('hit', asbool(span.get_tag('hit') or 'True') and hit) | ||||
|             span.set_tag('expired', asbool(span.get_tag('expired') or 'False') or expired) | ||||
|     instance.value_and_created_fn = wrapped_backend_fetcher | ||||
|  | @ -0,0 +1,37 @@ | |||
| import dogpile | ||||
| 
 | ||||
| from ddtrace.pin import Pin, _DD_PIN_NAME, _DD_PIN_PROXY_NAME | ||||
| from ddtrace.vendor.wrapt import wrap_function_wrapper as _w | ||||
| 
 | ||||
| from .lock import _wrap_lock_ctor | ||||
| from .region import _wrap_get_create, _wrap_get_create_multi | ||||
| 
 | ||||
| _get_or_create = dogpile.cache.region.CacheRegion.get_or_create | ||||
| _get_or_create_multi = dogpile.cache.region.CacheRegion.get_or_create_multi | ||||
| _lock_ctor = dogpile.lock.Lock.__init__ | ||||
| 
 | ||||
| 
 | ||||
| def patch(): | ||||
|     if getattr(dogpile.cache, '_datadog_patch', False): | ||||
|         return | ||||
|     setattr(dogpile.cache, '_datadog_patch', True) | ||||
| 
 | ||||
|     _w('dogpile.cache.region', 'CacheRegion.get_or_create', _wrap_get_create) | ||||
|     _w('dogpile.cache.region', 'CacheRegion.get_or_create_multi', _wrap_get_create_multi) | ||||
|     _w('dogpile.lock', 'Lock.__init__', _wrap_lock_ctor) | ||||
| 
 | ||||
|     Pin(app='dogpile.cache', service='dogpile.cache').onto(dogpile.cache) | ||||
| 
 | ||||
| 
 | ||||
| def unpatch(): | ||||
|     if not getattr(dogpile.cache, '_datadog_patch', False): | ||||
|         return | ||||
|     setattr(dogpile.cache, '_datadog_patch', False) | ||||
|     # This looks silly but the unwrap util doesn't support class instance methods, even | ||||
|     # though wrapt does. This was causing the patches to stack on top of each other | ||||
|     # during testing. | ||||
|     dogpile.cache.region.CacheRegion.get_or_create = _get_or_create | ||||
|     dogpile.cache.region.CacheRegion.get_or_create_multi = _get_or_create_multi | ||||
|     dogpile.lock.Lock.__init__ = _lock_ctor | ||||
|     setattr(dogpile.cache, _DD_PIN_NAME, None) | ||||
|     setattr(dogpile.cache, _DD_PIN_PROXY_NAME, None) | ||||
|  | @ -0,0 +1,29 @@ | |||
| import dogpile | ||||
| 
 | ||||
| from ...pin import Pin | ||||
| 
 | ||||
| 
 | ||||
| def _wrap_get_create(func, instance, args, kwargs): | ||||
|     pin = Pin.get_from(dogpile.cache) | ||||
|     if not pin or not pin.enabled(): | ||||
|         return func(*args, **kwargs) | ||||
| 
 | ||||
|     key = args[0] | ||||
|     with pin.tracer.trace('dogpile.cache', resource='get_or_create', span_type='cache') as span: | ||||
|         span.set_tag('key', key) | ||||
|         span.set_tag('region', instance.name) | ||||
|         span.set_tag('backend', instance.actual_backend.__class__.__name__) | ||||
|         return func(*args, **kwargs) | ||||
| 
 | ||||
| 
 | ||||
| def _wrap_get_create_multi(func, instance, args, kwargs): | ||||
|     pin = Pin.get_from(dogpile.cache) | ||||
|     if not pin or not pin.enabled(): | ||||
|         return func(*args, **kwargs) | ||||
| 
 | ||||
|     keys = args[0] | ||||
|     with pin.tracer.trace('dogpile.cache', resource='get_or_create_multi', span_type='cache') as span: | ||||
|         span.set_tag('keys', keys) | ||||
|         span.set_tag('region', instance.name) | ||||
|         span.set_tag('backend', instance.actual_backend.__class__.__name__) | ||||
|         return func(*args, **kwargs) | ||||
|  | @ -0,0 +1,33 @@ | |||
| """Instrument Elasticsearch to report Elasticsearch queries. | ||||
| 
 | ||||
| ``patch_all`` will automatically patch your Elasticsearch instance to make it work. | ||||
| :: | ||||
| 
 | ||||
|     from ddtrace import Pin, patch | ||||
|     from elasticsearch import Elasticsearch | ||||
| 
 | ||||
|     # If not patched yet, you can patch elasticsearch specifically | ||||
|     patch(elasticsearch=True) | ||||
| 
 | ||||
|     # This will report spans with the default instrumentation | ||||
|     es = Elasticsearch(port=ELASTICSEARCH_CONFIG['port']) | ||||
|     # Example of instrumented query | ||||
|     es.indices.create(index='books', ignore=400) | ||||
| 
 | ||||
|     # Use a pin to specify metadata related to this client | ||||
|     es = Elasticsearch(port=ELASTICSEARCH_CONFIG['port']) | ||||
|     Pin.override(es.transport, service='elasticsearch-videos') | ||||
|     es.indices.create(index='videos', ignore=400) | ||||
| """ | ||||
| from ...utils.importlib import require_modules | ||||
| 
 | ||||
| # DEV: We only require one of these modules to be available | ||||
| required_modules = ['elasticsearch', 'elasticsearch1', 'elasticsearch2', 'elasticsearch5', 'elasticsearch6'] | ||||
| 
 | ||||
| with require_modules(required_modules) as missing_modules: | ||||
|     # We were able to find at least one of the required modules | ||||
|     if set(missing_modules) != set(required_modules): | ||||
|         from .transport import get_traced_transport | ||||
|         from .patch import patch | ||||
| 
 | ||||
|         __all__ = ['get_traced_transport', 'patch'] | ||||
|  | @ -0,0 +1,14 @@ | |||
| from importlib import import_module | ||||
| 
 | ||||
| module_names = ('elasticsearch', 'elasticsearch1', 'elasticsearch2', 'elasticsearch5', 'elasticsearch6') | ||||
| for module_name in module_names: | ||||
|     try: | ||||
|         elasticsearch = import_module(module_name) | ||||
|         break | ||||
|     except ImportError: | ||||
|         pass | ||||
| else: | ||||
|     raise ImportError('could not import any of {0!r}'.format(module_names)) | ||||
| 
 | ||||
| 
 | ||||
| __all__ = ['elasticsearch'] | ||||
|  | @ -0,0 +1,118 @@ | |||
| from importlib import import_module | ||||
| 
 | ||||
| from ddtrace.vendor.wrapt import wrap_function_wrapper as _w | ||||
| 
 | ||||
| from .quantize import quantize | ||||
| 
 | ||||
| from ...compat import urlencode | ||||
| from ...constants import ANALYTICS_SAMPLE_RATE_KEY | ||||
| from ...ext import SpanTypes, elasticsearch as metadata, http | ||||
| from ...pin import Pin | ||||
| from ...utils.wrappers import unwrap as _u | ||||
| from ...settings import config | ||||
| 
 | ||||
| 
 | ||||
| def _es_modules(): | ||||
|     module_names = ('elasticsearch', 'elasticsearch1', 'elasticsearch2', 'elasticsearch5', 'elasticsearch6') | ||||
|     for module_name in module_names: | ||||
|         try: | ||||
|             yield import_module(module_name) | ||||
|         except ImportError: | ||||
|             pass | ||||
| 
 | ||||
| 
 | ||||
| # NB: We are patching the default elasticsearch.transport module | ||||
| def patch(): | ||||
|     for elasticsearch in _es_modules(): | ||||
|         _patch(elasticsearch) | ||||
| 
 | ||||
| 
 | ||||
| def _patch(elasticsearch): | ||||
|     if getattr(elasticsearch, '_datadog_patch', False): | ||||
|         return | ||||
|     setattr(elasticsearch, '_datadog_patch', True) | ||||
|     _w(elasticsearch.transport, 'Transport.perform_request', _get_perform_request(elasticsearch)) | ||||
|     Pin(service=metadata.SERVICE, app=metadata.APP).onto(elasticsearch.transport.Transport) | ||||
| 
 | ||||
| 
 | ||||
| def unpatch(): | ||||
|     for elasticsearch in _es_modules(): | ||||
|         _unpatch(elasticsearch) | ||||
| 
 | ||||
| 
 | ||||
| def _unpatch(elasticsearch): | ||||
|     if getattr(elasticsearch, '_datadog_patch', False): | ||||
|         setattr(elasticsearch, '_datadog_patch', False) | ||||
|         _u(elasticsearch.transport.Transport, 'perform_request') | ||||
| 
 | ||||
| 
 | ||||
| def _get_perform_request(elasticsearch): | ||||
|     def _perform_request(func, instance, args, kwargs): | ||||
|         pin = Pin.get_from(instance) | ||||
|         if not pin or not pin.enabled(): | ||||
|             return func(*args, **kwargs) | ||||
| 
 | ||||
|         with pin.tracer.trace('elasticsearch.query', span_type=SpanTypes.ELASTICSEARCH) as span: | ||||
|             # Don't instrument if the trace is not sampled | ||||
|             if not span.sampled: | ||||
|                 return func(*args, **kwargs) | ||||
| 
 | ||||
|             method, url = args | ||||
|             params = kwargs.get('params') | ||||
|             body = kwargs.get('body') | ||||
| 
 | ||||
|             span.service = pin.service | ||||
|             span.set_tag(metadata.METHOD, method) | ||||
|             span.set_tag(metadata.URL, url) | ||||
|             span.set_tag(metadata.PARAMS, urlencode(params)) | ||||
|             if config.elasticsearch.trace_query_string: | ||||
|                 span.set_tag(http.QUERY_STRING, urlencode(params)) | ||||
|             if method == 'GET': | ||||
|                 span.set_tag(metadata.BODY, instance.serializer.dumps(body)) | ||||
|             status = None | ||||
| 
 | ||||
|             # set analytics sample rate | ||||
|             span.set_tag( | ||||
|                 ANALYTICS_SAMPLE_RATE_KEY, | ||||
|                 config.elasticsearch.get_analytics_sample_rate() | ||||
|             ) | ||||
| 
 | ||||
|             span = quantize(span) | ||||
| 
 | ||||
|             try: | ||||
|                 result = func(*args, **kwargs) | ||||
|             except elasticsearch.exceptions.TransportError as e: | ||||
|                 span.set_tag(http.STATUS_CODE, getattr(e, 'status_code', 500)) | ||||
|                 raise | ||||
| 
 | ||||
|             try: | ||||
|                 # Optional metadata extraction with soft fail. | ||||
|                 if isinstance(result, tuple) and len(result) == 2: | ||||
|                     # elasticsearch<2.4; it returns both the status and the body | ||||
|                     status, data = result | ||||
|                 else: | ||||
|                     # elasticsearch>=2.4; internal change for ``Transport.perform_request`` | ||||
|                     # that just returns the body | ||||
|                     data = result | ||||
| 
 | ||||
|                 took = data.get('took') | ||||
|                 if took: | ||||
|                     span.set_metric(metadata.TOOK, int(took)) | ||||
|             except Exception: | ||||
|                 pass | ||||
| 
 | ||||
|             if status: | ||||
|                 span.set_tag(http.STATUS_CODE, status) | ||||
| 
 | ||||
|             return result | ||||
|     return _perform_request | ||||
| 
 | ||||
| 
 | ||||
| # Backwards compatibility for anyone who decided to import `ddtrace.contrib.elasticsearch.patch._perform_request` | ||||
| # DEV: `_perform_request` is a `wrapt.FunctionWrapper` | ||||
| try: | ||||
|     # DEV: Import as `es` to not shadow loop variables above | ||||
|     import elasticsearch as es | ||||
|     _perform_request = _get_perform_request(es) | ||||
| except ImportError: | ||||
|     pass | ||||
|  | @ -0,0 +1,37 @@ | |||
| import re | ||||
| 
 | ||||
| from ...ext import elasticsearch as metadata | ||||
| 
 | ||||
| # Replace any ID | ||||
| ID_REGEXP = re.compile(r'/([0-9]+)([/\?]|$)') | ||||
| ID_PLACEHOLDER = r'/?\2' | ||||
| 
 | ||||
| # Remove digits from potential timestamped indexes (should be an option). | ||||
| # For now, let's say 2+ digits | ||||
| INDEX_REGEXP = re.compile(r'[0-9]{2,}') | ||||
| INDEX_PLACEHOLDER = r'?' | ||||
| 
 | ||||
| 
 | ||||
| def quantize(span): | ||||
|     """Quantize an elasticsearch span | ||||
| 
 | ||||
|     We want to extract a meaningful `resource` from the request. | ||||
|     We do it based on the method + url, with some cleanup applied to the URL. | ||||
| 
 | ||||
|     The URL might a ID, but also it is common to have timestamped indexes. | ||||
|     While the first is easy to catch, the second should probably be configurable. | ||||
| 
 | ||||
|     All of this should probably be done in the Agent. Later. | ||||
|     """ | ||||
|     url = span.get_tag(metadata.URL) | ||||
|     method = span.get_tag(metadata.METHOD) | ||||
| 
 | ||||
|     quantized_url = ID_REGEXP.sub(ID_PLACEHOLDER, url) | ||||
|     quantized_url = INDEX_REGEXP.sub(INDEX_PLACEHOLDER, quantized_url) | ||||
| 
 | ||||
|     span.resource = '{method} {url}'.format( | ||||
|         method=method, | ||||
|         url=quantized_url | ||||
|     ) | ||||
| 
 | ||||
|     return span | ||||
|  | @ -0,0 +1,66 @@ | |||
| # DEV: This will import the first available module from: | ||||
| #   `elasticsearch`, `elasticsearch1`, `elasticsearch2`, `elasticsearch5`, 'elasticsearch6' | ||||
| from .elasticsearch import elasticsearch | ||||
| 
 | ||||
| from .quantize import quantize | ||||
| 
 | ||||
| from ...utils.deprecation import deprecated | ||||
| from ...compat import urlencode | ||||
| from ...ext import SpanTypes, http, elasticsearch as metadata | ||||
| from ...settings import config | ||||
| 
 | ||||
| DEFAULT_SERVICE = 'elasticsearch' | ||||
| 
 | ||||
| 
 | ||||
| @deprecated(message='Use patching instead (see the docs).', version='1.0.0') | ||||
| def get_traced_transport(datadog_tracer, datadog_service=DEFAULT_SERVICE): | ||||
| 
 | ||||
|     class TracedTransport(elasticsearch.Transport): | ||||
|         """ Extend elasticseach transport layer to allow Datadog | ||||
|             tracer to catch any performed request. | ||||
|         """ | ||||
| 
 | ||||
|         _datadog_tracer = datadog_tracer | ||||
|         _datadog_service = datadog_service | ||||
| 
 | ||||
|         def perform_request(self, method, url, params=None, body=None): | ||||
|             with self._datadog_tracer.trace('elasticsearch.query', span_type=SpanTypes.ELASTICSEARCH) as s: | ||||
|                 # Don't instrument if the trace is not sampled | ||||
|                 if not s.sampled: | ||||
|                     return super(TracedTransport, self).perform_request( | ||||
|                         method, url, params=params, body=body) | ||||
| 
 | ||||
|                 s.service = self._datadog_service | ||||
|                 s.set_tag(metadata.METHOD, method) | ||||
|                 s.set_tag(metadata.URL, url) | ||||
|                 s.set_tag(metadata.PARAMS, urlencode(params)) | ||||
|                 if config.elasticsearch.trace_query_string: | ||||
|                     s.set_tag(http.QUERY_STRING, urlencode(params)) | ||||
|                 if method == 'GET': | ||||
|                     s.set_tag(metadata.BODY, self.serializer.dumps(body)) | ||||
|                 s = quantize(s) | ||||
| 
 | ||||
|                 try: | ||||
|                     result = super(TracedTransport, self).perform_request(method, url, params=params, body=body) | ||||
|                 except elasticsearch.exceptions.TransportError as e: | ||||
|                     s.set_tag(http.STATUS_CODE, e.status_code) | ||||
|                     raise | ||||
| 
 | ||||
|                 status = None | ||||
|                 if isinstance(result, tuple) and len(result) == 2: | ||||
|                     # elasticsearch<2.4; it returns both the status and the body | ||||
|                     status, data = result | ||||
|                 else: | ||||
|                     # elasticsearch>=2.4; internal change for ``Transport.perform_request`` | ||||
|                     # that just returns the body | ||||
|                     data = result | ||||
| 
 | ||||
|                 if status: | ||||
|                     s.set_tag(http.STATUS_CODE, status) | ||||
| 
 | ||||
|                 took = data.get('took') | ||||
|                 if took: | ||||
|                     s.set_metric(metadata.TOOK, int(took)) | ||||
| 
 | ||||
|                 return result | ||||
|     return TracedTransport | ||||
|  | @ -0,0 +1,59 @@ | |||
| """ | ||||
| To trace the falcon web framework, install the trace middleware:: | ||||
| 
 | ||||
|     import falcon | ||||
|     from ddtrace import tracer | ||||
|     from ddtrace.contrib.falcon import TraceMiddleware | ||||
| 
 | ||||
|     mw = TraceMiddleware(tracer, 'my-falcon-app') | ||||
|     falcon.API(middleware=[mw]) | ||||
| 
 | ||||
| You can also use the autopatching functionality:: | ||||
| 
 | ||||
|     import falcon | ||||
|     from ddtrace import tracer, patch | ||||
| 
 | ||||
|     patch(falcon=True) | ||||
| 
 | ||||
|     app = falcon.API() | ||||
| 
 | ||||
| To disable distributed tracing when using autopatching, set the | ||||
| ``DATADOG_FALCON_DISTRIBUTED_TRACING`` environment variable to ``False``. | ||||
| 
 | ||||
| To enable generating APM events for Trace Search & Analytics, set the | ||||
| ``DD_FALCON_ANALYTICS_ENABLED`` environment variable to ``True``. | ||||
| 
 | ||||
| **Supported span hooks** | ||||
| 
 | ||||
| The following is a list of available tracer hooks that can be used to intercept | ||||
| and modify spans created by this integration. | ||||
| 
 | ||||
| - ``request`` | ||||
|     - Called before the response has been finished | ||||
|     - ``def on_falcon_request(span, request, response)`` | ||||
| 
 | ||||
| 
 | ||||
| Example:: | ||||
| 
 | ||||
|     import falcon | ||||
|     from ddtrace import config, patch_all | ||||
|     patch_all() | ||||
| 
 | ||||
|     app = falcon.API() | ||||
| 
 | ||||
|     @config.falcon.hooks.on('request') | ||||
|     def on_falcon_request(span, request, response): | ||||
|         span.set_tag('my.custom', 'tag') | ||||
| 
 | ||||
| :ref:`Headers tracing <http-headers-tracing>` is supported for this integration. | ||||
| """ | ||||
| from ...utils.importlib import require_modules | ||||
| 
 | ||||
| required_modules = ['falcon'] | ||||
| 
 | ||||
| with require_modules(required_modules) as missing_modules: | ||||
|     if not missing_modules: | ||||
|         from .middleware import TraceMiddleware | ||||
|         from .patch import patch | ||||
| 
 | ||||
|         __all__ = ['TraceMiddleware', 'patch'] | ||||
|  | @ -0,0 +1,116 @@ | |||
| import sys | ||||
| 
 | ||||
| from ddtrace.ext import SpanTypes, http as httpx | ||||
| from ddtrace.http import store_request_headers, store_response_headers | ||||
| from ddtrace.propagation.http import HTTPPropagator | ||||
| 
 | ||||
| from ...compat import iteritems | ||||
| from ...constants import ANALYTICS_SAMPLE_RATE_KEY | ||||
| from ...settings import config | ||||
| 
 | ||||
| 
 | ||||
| class TraceMiddleware(object): | ||||
| 
 | ||||
|     def __init__(self, tracer, service='falcon', distributed_tracing=True): | ||||
|         # store tracing references | ||||
|         self.tracer = tracer | ||||
|         self.service = service | ||||
|         self._distributed_tracing = distributed_tracing | ||||
| 
 | ||||
|     def process_request(self, req, resp): | ||||
|         if self._distributed_tracing: | ||||
|             # Falcon uppercases all header names. | ||||
|             headers = dict((k.lower(), v) for k, v in iteritems(req.headers)) | ||||
|             propagator = HTTPPropagator() | ||||
|             context = propagator.extract(headers) | ||||
|             # Only activate the new context if there was a trace id extracted | ||||
|             if context.trace_id: | ||||
|                 self.tracer.context_provider.activate(context) | ||||
| 
 | ||||
|         span = self.tracer.trace( | ||||
|             'falcon.request', | ||||
|             service=self.service, | ||||
|             span_type=SpanTypes.WEB, | ||||
|         ) | ||||
| 
 | ||||
|         # set analytics sample rate with global config enabled | ||||
|         span.set_tag( | ||||
|             ANALYTICS_SAMPLE_RATE_KEY, | ||||
|             config.falcon.get_analytics_sample_rate(use_global_config=True) | ||||
|         ) | ||||
| 
 | ||||
|         span.set_tag(httpx.METHOD, req.method) | ||||
|         span.set_tag(httpx.URL, req.url) | ||||
|         if config.falcon.trace_query_string: | ||||
|             span.set_tag(httpx.QUERY_STRING, req.query_string) | ||||
| 
 | ||||
|         # Note: any request header set after this line will not be stored in the span | ||||
|         store_request_headers(req.headers, span, config.falcon) | ||||
| 
 | ||||
|     def process_resource(self, req, resp, resource, params): | ||||
|         span = self.tracer.current_span() | ||||
|         if not span: | ||||
|             return  # unexpected | ||||
|         span.resource = '%s %s' % (req.method, _name(resource)) | ||||
| 
 | ||||
|     def process_response(self, req, resp, resource, req_succeeded=None): | ||||
|         # req_succeded is not a kwarg in the API, but we need that to support | ||||
|         # Falcon 1.0 that doesn't provide this argument | ||||
|         span = self.tracer.current_span() | ||||
|         if not span: | ||||
|             return  # unexpected | ||||
| 
 | ||||
|         status = httpx.normalize_status_code(resp.status) | ||||
| 
 | ||||
|         # Note: any response header set after this line will not be stored in the span | ||||
|         store_response_headers(resp._headers, span, config.falcon) | ||||
| 
 | ||||
|         # FIXME[matt] falcon does not map errors or unmatched routes | ||||
|         # to proper status codes, so we we have to try to infer them | ||||
|         # here. See https://github.com/falconry/falcon/issues/606 | ||||
|         if resource is None: | ||||
|             status = '404' | ||||
|             span.resource = '%s 404' % req.method | ||||
|             span.set_tag(httpx.STATUS_CODE, status) | ||||
|             span.finish() | ||||
|             return | ||||
| 
 | ||||
|         err_type = sys.exc_info()[0] | ||||
|         if err_type is not None: | ||||
|             if req_succeeded is None: | ||||
|                 # backward-compatibility with Falcon 1.0; any version | ||||
|                 # greater than 1.0 has req_succeded in [True, False] | ||||
|                 # TODO[manu]: drop the support at some point | ||||
|                 status = _detect_and_set_status_error(err_type, span) | ||||
|             elif req_succeeded is False: | ||||
|                 # Falcon 1.1+ provides that argument that is set to False | ||||
|                 # if get an Exception (404 is still an exception) | ||||
|                 status = _detect_and_set_status_error(err_type, span) | ||||
| 
 | ||||
|         span.set_tag(httpx.STATUS_CODE, status) | ||||
| 
 | ||||
|         # Emit span hook for this response | ||||
|         # DEV: Emit before closing so they can overwrite `span.resource` if they want | ||||
|         config.falcon.hooks._emit('request', span, req, resp) | ||||
| 
 | ||||
|         # Close the span | ||||
|         span.finish() | ||||
| 
 | ||||
| 
 | ||||
| def _is_404(err_type): | ||||
|     return 'HTTPNotFound' in err_type.__name__ | ||||
| 
 | ||||
| 
 | ||||
| def _detect_and_set_status_error(err_type, span): | ||||
|     """Detect the HTTP status code from the current stacktrace and | ||||
|     set the traceback to the given Span | ||||
|     """ | ||||
|     if not _is_404(err_type): | ||||
|         span.set_traceback() | ||||
|         return '500' | ||||
|     elif _is_404(err_type): | ||||
|         return '404' | ||||
| 
 | ||||
| 
 | ||||
| def _name(r): | ||||
|     return '%s.%s' % (r.__module__, r.__class__.__name__) | ||||
|  | @ -0,0 +1,31 @@ | |||
| import os | ||||
| from ddtrace.vendor import wrapt | ||||
| import falcon | ||||
| 
 | ||||
| from ddtrace import tracer | ||||
| 
 | ||||
| from .middleware import TraceMiddleware | ||||
| from ...utils.formats import asbool, get_env | ||||
| 
 | ||||
| 
 | ||||
| def patch(): | ||||
|     """ | ||||
|     Patch falcon.API to include contrib.falcon.TraceMiddleware | ||||
|     by default | ||||
|     """ | ||||
|     if getattr(falcon, '_datadog_patch', False): | ||||
|         return | ||||
| 
 | ||||
|     setattr(falcon, '_datadog_patch', True) | ||||
|     wrapt.wrap_function_wrapper('falcon', 'API.__init__', traced_init) | ||||
| 
 | ||||
| 
 | ||||
| def traced_init(wrapped, instance, args, kwargs): | ||||
|     mw = kwargs.pop('middleware', []) | ||||
|     service = os.environ.get('DATADOG_SERVICE_NAME') or 'falcon' | ||||
|     distributed_tracing = asbool(get_env('falcon', 'distributed_tracing', True)) | ||||
| 
 | ||||
|     mw.insert(0, TraceMiddleware(tracer, service, distributed_tracing)) | ||||
|     kwargs['middleware'] = mw | ||||
| 
 | ||||
|     wrapped(*args, **kwargs) | ||||
|  | @ -0,0 +1,113 @@ | |||
| """ | ||||
| The Flask__ integration will add tracing to all requests to your Flask application. | ||||
| 
 | ||||
| This integration will track the entire Flask lifecycle including user-defined endpoints, hooks, | ||||
| signals, and templating rendering. | ||||
| 
 | ||||
| To configure tracing manually:: | ||||
| 
 | ||||
|     from ddtrace import patch_all | ||||
|     patch_all() | ||||
| 
 | ||||
|     from flask import Flask | ||||
| 
 | ||||
|     app = Flask(__name__) | ||||
| 
 | ||||
| 
 | ||||
|     @app.route('/') | ||||
|     def index(): | ||||
|         return 'hello world' | ||||
| 
 | ||||
| 
 | ||||
|     if __name__ == '__main__': | ||||
|         app.run() | ||||
| 
 | ||||
| 
 | ||||
| You may also enable Flask tracing automatically via ddtrace-run:: | ||||
| 
 | ||||
|     ddtrace-run python app.py | ||||
| 
 | ||||
| 
 | ||||
| Configuration | ||||
| ~~~~~~~~~~~~~ | ||||
| 
 | ||||
| .. py:data:: ddtrace.config.flask['distributed_tracing_enabled'] | ||||
| 
 | ||||
|    Whether to parse distributed tracing headers from requests received by your Flask app. | ||||
| 
 | ||||
|    Default: ``True`` | ||||
| 
 | ||||
| .. py:data:: ddtrace.config.flask['analytics_enabled'] | ||||
| 
 | ||||
|    Whether to generate APM events for Flask in Trace Search & Analytics. | ||||
| 
 | ||||
|    Can also be enabled with the ``DD_FLASK_ANALYTICS_ENABLED`` environment variable. | ||||
| 
 | ||||
|    Default: ``None`` | ||||
| 
 | ||||
| .. py:data:: ddtrace.config.flask['service_name'] | ||||
| 
 | ||||
|    The service name reported for your Flask app. | ||||
| 
 | ||||
|    Can also be configured via the ``DATADOG_SERVICE_NAME`` environment variable. | ||||
| 
 | ||||
|    Default: ``'flask'`` | ||||
| 
 | ||||
| .. py:data:: ddtrace.config.flask['collect_view_args'] | ||||
| 
 | ||||
|    Whether to add request tags for view function argument values. | ||||
| 
 | ||||
|    Default: ``True`` | ||||
| 
 | ||||
| .. py:data:: ddtrace.config.flask['template_default_name'] | ||||
| 
 | ||||
|    The default template name to use when one does not exist. | ||||
| 
 | ||||
|    Default: ``<memory>`` | ||||
| 
 | ||||
| .. py:data:: ddtrace.config.flask['trace_signals'] | ||||
| 
 | ||||
|    Whether to trace Flask signals (``before_request``, ``after_request``, etc). | ||||
| 
 | ||||
|    Default: ``True`` | ||||
| 
 | ||||
| .. py:data:: ddtrace.config.flask['extra_error_codes'] | ||||
| 
 | ||||
|    A list of response codes that should get marked as errors. | ||||
| 
 | ||||
|    *5xx codes are always considered an error.* | ||||
| 
 | ||||
|    Default: ``[]`` | ||||
| 
 | ||||
| 
 | ||||
| Example:: | ||||
| 
 | ||||
|     from ddtrace import config | ||||
| 
 | ||||
|     # Enable distributed tracing | ||||
|     config.flask['distributed_tracing_enabled'] = True | ||||
| 
 | ||||
|     # Override service name | ||||
|     config.flask['service_name'] = 'custom-service-name' | ||||
| 
 | ||||
|     # Report 401, and 403 responses as errors | ||||
|     config.flask['extra_error_codes'] = [401, 403] | ||||
| 
 | ||||
| .. __: http://flask.pocoo.org/ | ||||
| """ | ||||
| 
 | ||||
| from ...utils.importlib import require_modules | ||||
| 
 | ||||
| 
 | ||||
| required_modules = ['flask'] | ||||
| 
 | ||||
| with require_modules(required_modules) as missing_modules: | ||||
|     if not missing_modules: | ||||
|         # DEV: We do this so we can `@mock.patch('ddtrace.contrib.flask._patch.<func>')` in tests | ||||
|         from . import patch as _patch | ||||
|         from .middleware import TraceMiddleware | ||||
| 
 | ||||
|         patch = _patch.patch | ||||
|         unpatch = _patch.unpatch | ||||
| 
 | ||||
|         __all__ = ['TraceMiddleware', 'patch', 'unpatch'] | ||||
|  | @ -0,0 +1,44 @@ | |||
| from ddtrace import Pin | ||||
| import flask | ||||
| 
 | ||||
| 
 | ||||
| def get_current_app(): | ||||
|     """Helper to get the flask.app.Flask from the current app context""" | ||||
|     appctx = flask._app_ctx_stack.top | ||||
|     if appctx: | ||||
|         return appctx.app | ||||
|     return None | ||||
| 
 | ||||
| 
 | ||||
| def with_instance_pin(func): | ||||
|     """Helper to wrap a function wrapper and ensure an enabled pin is available for the `instance`""" | ||||
|     def wrapper(wrapped, instance, args, kwargs): | ||||
|         pin = Pin._find(wrapped, instance, get_current_app()) | ||||
|         if not pin or not pin.enabled(): | ||||
|             return wrapped(*args, **kwargs) | ||||
| 
 | ||||
|         return func(pin, wrapped, instance, args, kwargs) | ||||
|     return wrapper | ||||
| 
 | ||||
| 
 | ||||
| def simple_tracer(name, span_type=None): | ||||
|     """Generate a simple tracer that wraps the function call with `with tracer.trace()`""" | ||||
|     @with_instance_pin | ||||
|     def wrapper(pin, wrapped, instance, args, kwargs): | ||||
|         with pin.tracer.trace(name, service=pin.service, span_type=span_type): | ||||
|             return wrapped(*args, **kwargs) | ||||
|     return wrapper | ||||
| 
 | ||||
| 
 | ||||
| def get_current_span(pin, root=False): | ||||
|     """Helper to get the current span from the provided pins current call context""" | ||||
|     if not pin or not pin.enabled(): | ||||
|         return None | ||||
| 
 | ||||
|     ctx = pin.tracer.get_call_context() | ||||
|     if not ctx: | ||||
|         return None | ||||
| 
 | ||||
|     if root: | ||||
|         return ctx.get_current_root_span() | ||||
|     return ctx.get_current_span() | ||||
|  | @ -0,0 +1,208 @@ | |||
| from ... import compat | ||||
| from ...ext import SpanTypes, http, errors | ||||
| from ...internal.logger import get_logger | ||||
| from ...propagation.http import HTTPPropagator | ||||
| from ...utils.deprecation import deprecated | ||||
| 
 | ||||
| import flask.templating | ||||
| from flask import g, request, signals | ||||
| 
 | ||||
| 
 | ||||
| log = get_logger(__name__) | ||||
| 
 | ||||
| 
 | ||||
| SPAN_NAME = 'flask.request' | ||||
| 
 | ||||
| 
 | ||||
| class TraceMiddleware(object): | ||||
| 
 | ||||
|     @deprecated(message='Use patching instead (see the docs).', version='1.0.0') | ||||
|     def __init__(self, app, tracer, service='flask', use_signals=True, distributed_tracing=False): | ||||
|         self.app = app | ||||
|         log.debug('flask: initializing trace middleware') | ||||
| 
 | ||||
|         # Attach settings to the inner application middleware. This is required if double | ||||
|         # instrumentation happens (i.e. `ddtrace-run` with `TraceMiddleware`). In that | ||||
|         # case, `ddtrace-run` instruments the application, but then users code is unable | ||||
|         # to update settings such as `distributed_tracing` flag. This step can be removed | ||||
|         # when the `Config` object is used | ||||
|         self.app._tracer = tracer | ||||
|         self.app._service = service | ||||
|         self.app._use_distributed_tracing = distributed_tracing | ||||
|         self.use_signals = use_signals | ||||
| 
 | ||||
|         # safe-guard to avoid double instrumentation | ||||
|         if getattr(app, '__dd_instrumentation', False): | ||||
|             return | ||||
|         setattr(app, '__dd_instrumentation', True) | ||||
| 
 | ||||
|         # Install hooks which time requests. | ||||
|         self.app.before_request(self._before_request) | ||||
|         self.app.after_request(self._after_request) | ||||
|         self.app.teardown_request(self._teardown_request) | ||||
| 
 | ||||
|         # Add exception handling signals. This will annotate exceptions that | ||||
|         # are caught and handled in custom user code. | ||||
|         # See https://github.com/DataDog/dd-trace-py/issues/390 | ||||
|         if use_signals and not signals.signals_available: | ||||
|             log.debug(_blinker_not_installed_msg) | ||||
|         self.use_signals = use_signals and signals.signals_available | ||||
|         timing_signals = { | ||||
|             'got_request_exception': self._request_exception, | ||||
|         } | ||||
|         self._receivers = [] | ||||
|         if self.use_signals and _signals_exist(timing_signals): | ||||
|             self._connect(timing_signals) | ||||
| 
 | ||||
|         _patch_render(tracer) | ||||
| 
 | ||||
|     def _connect(self, signal_to_handler): | ||||
|         connected = True | ||||
|         for name, handler in signal_to_handler.items(): | ||||
|             s = getattr(signals, name, None) | ||||
|             if not s: | ||||
|                 connected = False | ||||
|                 log.warning('trying to instrument missing signal %s', name) | ||||
|                 continue | ||||
|             # we should connect to the signal without using weak references | ||||
|             # otherwise they will be garbage collected and our handlers | ||||
|             # will be disconnected after the first call; for more details check: | ||||
|             # https://github.com/jek/blinker/blob/207446f2d97/blinker/base.py#L106-L108 | ||||
|             s.connect(handler, sender=self.app, weak=False) | ||||
|             self._receivers.append(handler) | ||||
|         return connected | ||||
| 
 | ||||
|     def _before_request(self): | ||||
|         """ Starts tracing the current request and stores it in the global | ||||
|             request object. | ||||
|         """ | ||||
|         self._start_span() | ||||
| 
 | ||||
|     def _after_request(self, response): | ||||
|         """ Runs after the server can process a response. """ | ||||
|         try: | ||||
|             self._process_response(response) | ||||
|         except Exception: | ||||
|             log.debug('flask: error tracing response', exc_info=True) | ||||
|         return response | ||||
| 
 | ||||
|     def _teardown_request(self, exception): | ||||
|         """ Runs at the end of a request. If there's an unhandled exception, it | ||||
|             will be passed in. | ||||
|         """ | ||||
|         # when we teardown the span, ensure we have a clean slate. | ||||
|         span = getattr(g, 'flask_datadog_span', None) | ||||
|         setattr(g, 'flask_datadog_span', None) | ||||
|         if not span: | ||||
|             return | ||||
| 
 | ||||
|         try: | ||||
|             self._finish_span(span, exception=exception) | ||||
|         except Exception: | ||||
|             log.debug('flask: error finishing span', exc_info=True) | ||||
| 
 | ||||
|     def _start_span(self): | ||||
|         if self.app._use_distributed_tracing: | ||||
|             propagator = HTTPPropagator() | ||||
|             context = propagator.extract(request.headers) | ||||
|             # Only need to active the new context if something was propagated | ||||
|             if context.trace_id: | ||||
|                 self.app._tracer.context_provider.activate(context) | ||||
|         try: | ||||
|             g.flask_datadog_span = self.app._tracer.trace( | ||||
|                 SPAN_NAME, | ||||
|                 service=self.app._service, | ||||
|                 span_type=SpanTypes.WEB, | ||||
|             ) | ||||
|         except Exception: | ||||
|             log.debug('flask: error tracing request', exc_info=True) | ||||
| 
 | ||||
|     def _process_response(self, response): | ||||
|         span = getattr(g, 'flask_datadog_span', None) | ||||
|         if not (span and span.sampled): | ||||
|             return | ||||
| 
 | ||||
|         code = response.status_code if response else '' | ||||
|         span.set_tag(http.STATUS_CODE, code) | ||||
| 
 | ||||
|     def _request_exception(self, *args, **kwargs): | ||||
|         exception = kwargs.get('exception', None) | ||||
|         span = getattr(g, 'flask_datadog_span', None) | ||||
|         if span and exception: | ||||
|             _set_error_on_span(span, exception) | ||||
| 
 | ||||
|     def _finish_span(self, span, exception=None): | ||||
|         if not span or not span.sampled: | ||||
|             return | ||||
| 
 | ||||
|         code = span.get_tag(http.STATUS_CODE) or 0 | ||||
|         try: | ||||
|             code = int(code) | ||||
|         except Exception: | ||||
|             code = 0 | ||||
| 
 | ||||
|         if exception: | ||||
|             # if the request has already had a code set, don't override it. | ||||
|             code = code or 500 | ||||
|             _set_error_on_span(span, exception) | ||||
| 
 | ||||
|         # the endpoint that matched the request is None if an exception | ||||
|         # happened so we fallback to a common resource | ||||
|         span.error = 0 if code < 500 else 1 | ||||
| 
 | ||||
|         # the request isn't guaranteed to exist here, so only use it carefully. | ||||
|         method = '' | ||||
|         endpoint = '' | ||||
|         url = '' | ||||
|         if request: | ||||
|             method = request.method | ||||
|             endpoint = request.endpoint or code | ||||
|             url = request.base_url or '' | ||||
| 
 | ||||
|         # Let users specify their own resource in middleware if they so desire. | ||||
|         # See case https://github.com/DataDog/dd-trace-py/issues/353 | ||||
|         if span.resource == SPAN_NAME: | ||||
|             resource = endpoint or code | ||||
|             span.resource = compat.to_unicode(resource).lower() | ||||
| 
 | ||||
|         span.set_tag(http.URL, compat.to_unicode(url)) | ||||
|         span.set_tag(http.STATUS_CODE, code) | ||||
|         span.set_tag(http.METHOD, method) | ||||
|         span.finish() | ||||
| 
 | ||||
| 
 | ||||
| def _set_error_on_span(span, exception): | ||||
|     # The 3 next lines might not be strictly required, since `set_traceback` | ||||
|     # also get the exception from the sys.exc_info (and fill the error meta). | ||||
|     # Since we aren't sure it always work/for insuring no BC break, keep | ||||
|     # these lines which get overridden anyway. | ||||
|     span.set_tag(errors.ERROR_TYPE, type(exception)) | ||||
|     span.set_tag(errors.ERROR_MSG, exception) | ||||
|     # The provided `exception` object doesn't have a stack trace attached, | ||||
|     # so attach the stack trace with `set_traceback`. | ||||
|     span.set_traceback() | ||||
| 
 | ||||
| 
 | ||||
| def _patch_render(tracer): | ||||
|     """ patch flask's render template methods with the given tracer. """ | ||||
|     # fall back to patching  global method | ||||
|     _render = flask.templating._render | ||||
| 
 | ||||
|     def _traced_render(template, context, app): | ||||
|         with tracer.trace('flask.template', span_type=SpanTypes.TEMPLATE) as span: | ||||
|             span.set_tag('flask.template', template.name or 'string') | ||||
|             return _render(template, context, app) | ||||
| 
 | ||||
|     flask.templating._render = _traced_render | ||||
| 
 | ||||
| 
 | ||||
| def _signals_exist(names): | ||||
|     """ Return true if all of the given signals exist in this version of flask. | ||||
|     """ | ||||
|     return all(getattr(signals, n, False) for n in names) | ||||
| 
 | ||||
| 
 | ||||
| _blinker_not_installed_msg = ( | ||||
|     'please install blinker to use flask signals. ' | ||||
|     'http://flask.pocoo.org/docs/0.11/signals/' | ||||
| ) | ||||
|  | @ -0,0 +1,497 @@ | |||
| import os | ||||
| 
 | ||||
| import flask | ||||
| import werkzeug | ||||
| from ddtrace.vendor.wrapt import wrap_function_wrapper as _w | ||||
| 
 | ||||
| from ddtrace import compat | ||||
| from ddtrace import config, Pin | ||||
| 
 | ||||
| from ...constants import ANALYTICS_SAMPLE_RATE_KEY | ||||
| from ...ext import SpanTypes, http | ||||
| from ...internal.logger import get_logger | ||||
| from ...propagation.http import HTTPPropagator | ||||
| from ...utils.wrappers import unwrap as _u | ||||
| from .helpers import get_current_app, get_current_span, simple_tracer, with_instance_pin | ||||
| from .wrappers import wrap_function, wrap_signal | ||||
| 
 | ||||
| log = get_logger(__name__) | ||||
| 
 | ||||
| FLASK_ENDPOINT = 'flask.endpoint' | ||||
| FLASK_VIEW_ARGS = 'flask.view_args' | ||||
| FLASK_URL_RULE = 'flask.url_rule' | ||||
| FLASK_VERSION = 'flask.version' | ||||
| 
 | ||||
| # Configure default configuration | ||||
| config._add('flask', dict( | ||||
|     # Flask service configuration | ||||
|     # DEV: Environment variable 'DATADOG_SERVICE_NAME' used for backwards compatibility | ||||
|     service_name=os.environ.get('DATADOG_SERVICE_NAME') or 'flask', | ||||
|     app='flask', | ||||
| 
 | ||||
|     collect_view_args=True, | ||||
|     distributed_tracing_enabled=True, | ||||
|     template_default_name='<memory>', | ||||
|     trace_signals=True, | ||||
| 
 | ||||
|     # We mark 5xx responses as errors, these codes are additional status codes to mark as errors | ||||
|     # DEV: This is so that if a user wants to see `401` or `403` as an error, they can configure that | ||||
|     extra_error_codes=set(), | ||||
| )) | ||||
| 
 | ||||
| 
 | ||||
| # Extract flask version into a tuple e.g. (0, 12, 1) or (1, 0, 2) | ||||
| # DEV: This makes it so we can do `if flask_version >= (0, 12, 0):` | ||||
| # DEV: Example tests: | ||||
| #      (0, 10, 0) > (0, 10) | ||||
| #      (0, 10, 0) >= (0, 10, 0) | ||||
| #      (0, 10, 1) >= (0, 10) | ||||
| #      (0, 11, 1) >= (0, 10) | ||||
| #      (0, 11, 1) >= (0, 10, 2) | ||||
| #      (1, 0, 0) >= (0, 10) | ||||
| #      (0, 9) == (0, 9) | ||||
| #      (0, 9, 0) != (0, 9) | ||||
| #      (0, 8, 5) <= (0, 9) | ||||
| flask_version_str = getattr(flask, '__version__', '0.0.0') | ||||
| flask_version = tuple([int(i) for i in flask_version_str.split('.')]) | ||||
| 
 | ||||
| 
 | ||||
| def patch(): | ||||
|     """ | ||||
|     Patch `flask` module for tracing | ||||
|     """ | ||||
|     # Check to see if we have patched Flask yet or not | ||||
|     if getattr(flask, '_datadog_patch', False): | ||||
|         return | ||||
|     setattr(flask, '_datadog_patch', True) | ||||
| 
 | ||||
|     # Attach service pin to `flask.app.Flask` | ||||
|     Pin( | ||||
|         service=config.flask['service_name'], | ||||
|         app=config.flask['app'] | ||||
|     ).onto(flask.Flask) | ||||
| 
 | ||||
|     # flask.app.Flask methods that have custom tracing (add metadata, wrap functions, etc) | ||||
|     _w('flask', 'Flask.wsgi_app', traced_wsgi_app) | ||||
|     _w('flask', 'Flask.dispatch_request', request_tracer('dispatch_request')) | ||||
|     _w('flask', 'Flask.preprocess_request', request_tracer('preprocess_request')) | ||||
|     _w('flask', 'Flask.add_url_rule', traced_add_url_rule) | ||||
|     _w('flask', 'Flask.endpoint', traced_endpoint) | ||||
|     _w('flask', 'Flask._register_error_handler', traced_register_error_handler) | ||||
| 
 | ||||
|     # flask.blueprints.Blueprint methods that have custom tracing (add metadata, wrap functions, etc) | ||||
|     _w('flask', 'Blueprint.register', traced_blueprint_register) | ||||
|     _w('flask', 'Blueprint.add_url_rule', traced_blueprint_add_url_rule) | ||||
| 
 | ||||
|     # flask.app.Flask traced hook decorators | ||||
|     flask_hooks = [ | ||||
|         'before_request', | ||||
|         'before_first_request', | ||||
|         'after_request', | ||||
|         'teardown_request', | ||||
|         'teardown_appcontext', | ||||
|     ] | ||||
|     for hook in flask_hooks: | ||||
|         _w('flask', 'Flask.{}'.format(hook), traced_flask_hook) | ||||
|     _w('flask', 'after_this_request', traced_flask_hook) | ||||
| 
 | ||||
|     # flask.app.Flask traced methods | ||||
|     flask_app_traces = [ | ||||
|         'process_response', | ||||
|         'handle_exception', | ||||
|         'handle_http_exception', | ||||
|         'handle_user_exception', | ||||
|         'try_trigger_before_first_request_functions', | ||||
|         'do_teardown_request', | ||||
|         'do_teardown_appcontext', | ||||
|         'send_static_file', | ||||
|     ] | ||||
|     for name in flask_app_traces: | ||||
|         _w('flask', 'Flask.{}'.format(name), simple_tracer('flask.{}'.format(name))) | ||||
| 
 | ||||
|     # flask static file helpers | ||||
|     _w('flask', 'send_file', simple_tracer('flask.send_file')) | ||||
| 
 | ||||
|     # flask.json.jsonify | ||||
|     _w('flask', 'jsonify', traced_jsonify) | ||||
| 
 | ||||
|     # flask.templating traced functions | ||||
|     _w('flask.templating', '_render', traced_render) | ||||
|     _w('flask', 'render_template', traced_render_template) | ||||
|     _w('flask', 'render_template_string', traced_render_template_string) | ||||
| 
 | ||||
|     # flask.blueprints.Blueprint traced hook decorators | ||||
|     bp_hooks = [ | ||||
|         'after_app_request', | ||||
|         'after_request', | ||||
|         'before_app_first_request', | ||||
|         'before_app_request', | ||||
|         'before_request', | ||||
|         'teardown_request', | ||||
|         'teardown_app_request', | ||||
|     ] | ||||
|     for hook in bp_hooks: | ||||
|         _w('flask', 'Blueprint.{}'.format(hook), traced_flask_hook) | ||||
| 
 | ||||
|     # flask.signals signals | ||||
|     if config.flask['trace_signals']: | ||||
|         signals = [ | ||||
|             'template_rendered', | ||||
|             'request_started', | ||||
|             'request_finished', | ||||
|             'request_tearing_down', | ||||
|             'got_request_exception', | ||||
|             'appcontext_tearing_down', | ||||
|         ] | ||||
|         # These were added in 0.11.0 | ||||
|         if flask_version >= (0, 11): | ||||
|             signals.append('before_render_template') | ||||
| 
 | ||||
|         # These were added in 0.10.0 | ||||
|         if flask_version >= (0, 10): | ||||
|             signals.append('appcontext_pushed') | ||||
|             signals.append('appcontext_popped') | ||||
|             signals.append('message_flashed') | ||||
| 
 | ||||
|         for signal in signals: | ||||
|             module = 'flask' | ||||
| 
 | ||||
|             # v0.9 missed importing `appcontext_tearing_down` in `flask/__init__.py` | ||||
|             #  https://github.com/pallets/flask/blob/0.9/flask/__init__.py#L35-L37 | ||||
|             #  https://github.com/pallets/flask/blob/0.9/flask/signals.py#L52 | ||||
|             # DEV: Version 0.9 doesn't have a patch version | ||||
|             if flask_version <= (0, 9) and signal == 'appcontext_tearing_down': | ||||
|                 module = 'flask.signals' | ||||
| 
 | ||||
|             # DEV: Patch `receivers_for` instead of `connect` to ensure we don't mess with `disconnect` | ||||
|             _w(module, '{}.receivers_for'.format(signal), traced_signal_receivers_for(signal)) | ||||
| 
 | ||||
| 
 | ||||
| def unpatch(): | ||||
|     if not getattr(flask, '_datadog_patch', False): | ||||
|         return | ||||
|     setattr(flask, '_datadog_patch', False) | ||||
| 
 | ||||
|     props = [ | ||||
|         # Flask | ||||
|         'Flask.wsgi_app', | ||||
|         'Flask.dispatch_request', | ||||
|         'Flask.add_url_rule', | ||||
|         'Flask.endpoint', | ||||
|         'Flask._register_error_handler', | ||||
| 
 | ||||
|         'Flask.preprocess_request', | ||||
|         'Flask.process_response', | ||||
|         'Flask.handle_exception', | ||||
|         'Flask.handle_http_exception', | ||||
|         'Flask.handle_user_exception', | ||||
|         'Flask.try_trigger_before_first_request_functions', | ||||
|         'Flask.do_teardown_request', | ||||
|         'Flask.do_teardown_appcontext', | ||||
|         'Flask.send_static_file', | ||||
| 
 | ||||
|         # Flask Hooks | ||||
|         'Flask.before_request', | ||||
|         'Flask.before_first_request', | ||||
|         'Flask.after_request', | ||||
|         'Flask.teardown_request', | ||||
|         'Flask.teardown_appcontext', | ||||
| 
 | ||||
|         # Blueprint | ||||
|         'Blueprint.register', | ||||
|         'Blueprint.add_url_rule', | ||||
| 
 | ||||
|         # Blueprint Hooks | ||||
|         'Blueprint.after_app_request', | ||||
|         'Blueprint.after_request', | ||||
|         'Blueprint.before_app_first_request', | ||||
|         'Blueprint.before_app_request', | ||||
|         'Blueprint.before_request', | ||||
|         'Blueprint.teardown_request', | ||||
|         'Blueprint.teardown_app_request', | ||||
| 
 | ||||
|         # Signals | ||||
|         'template_rendered.receivers_for', | ||||
|         'request_started.receivers_for', | ||||
|         'request_finished.receivers_for', | ||||
|         'request_tearing_down.receivers_for', | ||||
|         'got_request_exception.receivers_for', | ||||
|         'appcontext_tearing_down.receivers_for', | ||||
| 
 | ||||
|         # Top level props | ||||
|         'after_this_request', | ||||
|         'send_file', | ||||
|         'jsonify', | ||||
|         'render_template', | ||||
|         'render_template_string', | ||||
|         'templating._render', | ||||
|     ] | ||||
| 
 | ||||
|     # These were added in 0.11.0 | ||||
|     if flask_version >= (0, 11): | ||||
|         props.append('before_render_template.receivers_for') | ||||
| 
 | ||||
|     # These were added in 0.10.0 | ||||
|     if flask_version >= (0, 10): | ||||
|         props.append('appcontext_pushed.receivers_for') | ||||
|         props.append('appcontext_popped.receivers_for') | ||||
|         props.append('message_flashed.receivers_for') | ||||
| 
 | ||||
|     for prop in props: | ||||
|         # Handle 'flask.request_started.receivers_for' | ||||
|         obj = flask | ||||
| 
 | ||||
|         # v0.9.0 missed importing `appcontext_tearing_down` in `flask/__init__.py` | ||||
|         #  https://github.com/pallets/flask/blob/0.9/flask/__init__.py#L35-L37 | ||||
|         #  https://github.com/pallets/flask/blob/0.9/flask/signals.py#L52 | ||||
|         # DEV: Version 0.9 doesn't have a patch version | ||||
|         if flask_version <= (0, 9) and prop == 'appcontext_tearing_down.receivers_for': | ||||
|             obj = flask.signals | ||||
| 
 | ||||
|         if '.' in prop: | ||||
|             attr, _, prop = prop.partition('.') | ||||
|             obj = getattr(obj, attr, object()) | ||||
|         _u(obj, prop) | ||||
| 
 | ||||
| 
 | ||||
| @with_instance_pin | ||||
| def traced_wsgi_app(pin, wrapped, instance, args, kwargs): | ||||
|     """ | ||||
|     Wrapper for flask.app.Flask.wsgi_app | ||||
| 
 | ||||
|     This wrapper is the starting point for all requests. | ||||
|     """ | ||||
|     # DEV: This is safe before this is the args for a WSGI handler | ||||
|     #   https://www.python.org/dev/peps/pep-3333/ | ||||
|     environ, start_response = args | ||||
| 
 | ||||
|     # Create a werkzeug request from the `environ` to make interacting with it easier | ||||
|     # DEV: This executes before a request context is created | ||||
|     request = werkzeug.Request(environ) | ||||
| 
 | ||||
|     # Configure distributed tracing | ||||
|     if config.flask.get('distributed_tracing_enabled', False): | ||||
|         propagator = HTTPPropagator() | ||||
|         context = propagator.extract(request.headers) | ||||
|         # Only need to activate the new context if something was propagated | ||||
|         if context.trace_id: | ||||
|             pin.tracer.context_provider.activate(context) | ||||
| 
 | ||||
|     # Default resource is method and path: | ||||
|     #   GET / | ||||
|     #   POST /save | ||||
|     # We will override this below in `traced_dispatch_request` when we have a `RequestContext` and possibly a url rule | ||||
|     resource = u'{} {}'.format(request.method, request.path) | ||||
|     with pin.tracer.trace('flask.request', service=pin.service, resource=resource, span_type=SpanTypes.WEB) as s: | ||||
|         # set analytics sample rate with global config enabled | ||||
|         sample_rate = config.flask.get_analytics_sample_rate(use_global_config=True) | ||||
|         if sample_rate is not None: | ||||
|             s.set_tag(ANALYTICS_SAMPLE_RATE_KEY, sample_rate) | ||||
| 
 | ||||
|         s.set_tag(FLASK_VERSION, flask_version_str) | ||||
| 
 | ||||
|         # Wrap the `start_response` handler to extract response code | ||||
|         # DEV: We tried using `Flask.finalize_request`, which seemed to work, but gave us hell during tests | ||||
|         # DEV: The downside to using `start_response` is we do not have a `Flask.Response` object here, | ||||
|         #   only `status_code`, and `headers` to work with | ||||
|         #   On the bright side, this works in all versions of Flask (or any WSGI app actually) | ||||
|         def _wrap_start_response(func): | ||||
|             def traced_start_response(status_code, headers): | ||||
|                 code, _, _ = status_code.partition(' ') | ||||
|                 try: | ||||
|                     code = int(code) | ||||
|                 except ValueError: | ||||
|                     pass | ||||
| 
 | ||||
|                 # Override root span resource name to be `<method> 404` for 404 requests | ||||
|                 # DEV: We do this because we want to make it easier to see all unknown requests together | ||||
|                 #      Also, we do this to reduce the cardinality on unknown urls | ||||
|                 # DEV: If we have an endpoint or url rule tag, then we don't need to do this, | ||||
|                 #      we still want `GET /product/<int:product_id>` grouped together, | ||||
|                 #      even if it is a 404 | ||||
|                 if not s.get_tag(FLASK_ENDPOINT) and not s.get_tag(FLASK_URL_RULE): | ||||
|                     s.resource = u'{} {}'.format(request.method, code) | ||||
| 
 | ||||
|                 s.set_tag(http.STATUS_CODE, code) | ||||
|                 if 500 <= code < 600: | ||||
|                     s.error = 1 | ||||
|                 elif code in config.flask.get('extra_error_codes', set()): | ||||
|                     s.error = 1 | ||||
|                 return func(status_code, headers) | ||||
|             return traced_start_response | ||||
|         start_response = _wrap_start_response(start_response) | ||||
| 
 | ||||
|         # DEV: We set response status code in `_wrap_start_response` | ||||
|         # DEV: Use `request.base_url` and not `request.url` to keep from leaking any query string parameters | ||||
|         s.set_tag(http.URL, request.base_url) | ||||
|         s.set_tag(http.METHOD, request.method) | ||||
|         if config.flask.trace_query_string: | ||||
|             s.set_tag(http.QUERY_STRING, compat.to_unicode(request.query_string)) | ||||
| 
 | ||||
|         return wrapped(environ, start_response) | ||||
| 
 | ||||
| 
 | ||||
| def traced_blueprint_register(wrapped, instance, args, kwargs): | ||||
|     """ | ||||
|     Wrapper for flask.blueprints.Blueprint.register | ||||
| 
 | ||||
|     This wrapper just ensures the blueprint has a pin, either set manually on | ||||
|     itself from the user or inherited from the application | ||||
|     """ | ||||
|     app = kwargs.get('app', args[0]) | ||||
|     # Check if this Blueprint has a pin, otherwise clone the one from the app onto it | ||||
|     pin = Pin.get_from(instance) | ||||
|     if not pin: | ||||
|         pin = Pin.get_from(app) | ||||
|         if pin: | ||||
|             pin.clone().onto(instance) | ||||
|     return wrapped(*args, **kwargs) | ||||
| 
 | ||||
| 
 | ||||
| def traced_blueprint_add_url_rule(wrapped, instance, args, kwargs): | ||||
|     pin = Pin._find(wrapped, instance) | ||||
|     if not pin: | ||||
|         return wrapped(*args, **kwargs) | ||||
| 
 | ||||
|     def _wrap(rule, endpoint=None, view_func=None, **kwargs): | ||||
|         if view_func: | ||||
|             pin.clone().onto(view_func) | ||||
|         return wrapped(rule, endpoint=endpoint, view_func=view_func, **kwargs) | ||||
| 
 | ||||
|     return _wrap(*args, **kwargs) | ||||
| 
 | ||||
| 
 | ||||
| def traced_add_url_rule(wrapped, instance, args, kwargs): | ||||
|     """Wrapper for flask.app.Flask.add_url_rule to wrap all views attached to this app""" | ||||
|     def _wrap(rule, endpoint=None, view_func=None, **kwargs): | ||||
|         if view_func: | ||||
|             # TODO: `if hasattr(view_func, 'view_class')` then this was generated from a `flask.views.View` | ||||
|             #   should we do something special with these views? Change the name/resource? Add tags? | ||||
|             view_func = wrap_function(instance, view_func, name=endpoint, resource=rule) | ||||
| 
 | ||||
|         return wrapped(rule, endpoint=endpoint, view_func=view_func, **kwargs) | ||||
| 
 | ||||
|     return _wrap(*args, **kwargs) | ||||
| 
 | ||||
| 
 | ||||
| def traced_endpoint(wrapped, instance, args, kwargs): | ||||
|     """Wrapper for flask.app.Flask.endpoint to ensure all endpoints are wrapped""" | ||||
|     endpoint = kwargs.get('endpoint', args[0]) | ||||
| 
 | ||||
|     def _wrapper(func): | ||||
|         # DEV: `wrap_function` will call `func_name(func)` for us | ||||
|         return wrapped(endpoint)(wrap_function(instance, func, resource=endpoint)) | ||||
|     return _wrapper | ||||
| 
 | ||||
| 
 | ||||
| def traced_flask_hook(wrapped, instance, args, kwargs): | ||||
|     """Wrapper for hook functions (before_request, after_request, etc) are properly traced""" | ||||
|     func = kwargs.get('f', args[0]) | ||||
|     return wrapped(wrap_function(instance, func)) | ||||
| 
 | ||||
| 
 | ||||
| def traced_render_template(wrapped, instance, args, kwargs): | ||||
|     """Wrapper for flask.templating.render_template""" | ||||
|     pin = Pin._find(wrapped, instance, get_current_app()) | ||||
|     if not pin or not pin.enabled(): | ||||
|         return wrapped(*args, **kwargs) | ||||
| 
 | ||||
|     with pin.tracer.trace('flask.render_template', span_type=SpanTypes.TEMPLATE): | ||||
|         return wrapped(*args, **kwargs) | ||||
| 
 | ||||
| 
 | ||||
| def traced_render_template_string(wrapped, instance, args, kwargs): | ||||
|     """Wrapper for flask.templating.render_template_string""" | ||||
|     pin = Pin._find(wrapped, instance, get_current_app()) | ||||
|     if not pin or not pin.enabled(): | ||||
|         return wrapped(*args, **kwargs) | ||||
| 
 | ||||
|     with pin.tracer.trace('flask.render_template_string', span_type=SpanTypes.TEMPLATE): | ||||
|         return wrapped(*args, **kwargs) | ||||
| 
 | ||||
| 
 | ||||
| def traced_render(wrapped, instance, args, kwargs): | ||||
|     """ | ||||
|     Wrapper for flask.templating._render | ||||
| 
 | ||||
|     This wrapper is used for setting template tags on the span. | ||||
| 
 | ||||
|     This method is called for render_template or render_template_string | ||||
|     """ | ||||
|     pin = Pin._find(wrapped, instance, get_current_app()) | ||||
|     # DEV: `get_current_span` will verify `pin` is valid and enabled first | ||||
|     span = get_current_span(pin) | ||||
|     if not span: | ||||
|         return wrapped(*args, **kwargs) | ||||
| 
 | ||||
|     def _wrap(template, context, app): | ||||
|         name = getattr(template, 'name', None) or config.flask.get('template_default_name') | ||||
|         span.resource = name | ||||
|         span.set_tag('flask.template_name', name) | ||||
|         return wrapped(*args, **kwargs) | ||||
|     return _wrap(*args, **kwargs) | ||||
| 
 | ||||
| 
 | ||||
| def traced_register_error_handler(wrapped, instance, args, kwargs): | ||||
|     """Wrapper to trace all functions registered with flask.app.register_error_handler""" | ||||
|     def _wrap(key, code_or_exception, f): | ||||
|         return wrapped(key, code_or_exception, wrap_function(instance, f)) | ||||
|     return _wrap(*args, **kwargs) | ||||
| 
 | ||||
| 
 | ||||
| def request_tracer(name): | ||||
|     @with_instance_pin | ||||
|     def _traced_request(pin, wrapped, instance, args, kwargs): | ||||
|         """ | ||||
|         Wrapper to trace a Flask function while trying to extract endpoint information | ||||
|           (endpoint, url_rule, view_args, etc) | ||||
| 
 | ||||
|         This wrapper will add identifier tags to the current span from `flask.app.Flask.wsgi_app`. | ||||
|         """ | ||||
|         span = get_current_span(pin) | ||||
|         if not span: | ||||
|             return wrapped(*args, **kwargs) | ||||
| 
 | ||||
|         try: | ||||
|             request = flask._request_ctx_stack.top.request | ||||
| 
 | ||||
|             # DEV: This name will include the blueprint name as well (e.g. `bp.index`) | ||||
|             if not span.get_tag(FLASK_ENDPOINT) and request.endpoint: | ||||
|                 span.resource = u'{} {}'.format(request.method, request.endpoint) | ||||
|                 span.set_tag(FLASK_ENDPOINT, request.endpoint) | ||||
| 
 | ||||
|             if not span.get_tag(FLASK_URL_RULE) and request.url_rule and request.url_rule.rule: | ||||
|                 span.resource = u'{} {}'.format(request.method, request.url_rule.rule) | ||||
|                 span.set_tag(FLASK_URL_RULE, request.url_rule.rule) | ||||
| 
 | ||||
|             if not span.get_tag(FLASK_VIEW_ARGS) and request.view_args and config.flask.get('collect_view_args'): | ||||
|                 for k, v in request.view_args.items(): | ||||
|                     span.set_tag(u'{}.{}'.format(FLASK_VIEW_ARGS, k), v) | ||||
|         except Exception: | ||||
|             log.debug('failed to set tags for "flask.request" span', exc_info=True) | ||||
| 
 | ||||
|         with pin.tracer.trace('flask.{}'.format(name), service=pin.service): | ||||
|             return wrapped(*args, **kwargs) | ||||
|     return _traced_request | ||||
| 
 | ||||
| 
 | ||||
| def traced_signal_receivers_for(signal): | ||||
|     """Wrapper for flask.signals.{signal}.receivers_for to ensure all signal receivers are traced""" | ||||
|     def outer(wrapped, instance, args, kwargs): | ||||
|         sender = kwargs.get('sender', args[0]) | ||||
|         # See if they gave us the flask.app.Flask as the sender | ||||
|         app = None | ||||
|         if isinstance(sender, flask.Flask): | ||||
|             app = sender | ||||
|         for receiver in wrapped(*args, **kwargs): | ||||
|             yield wrap_signal(app, signal, receiver) | ||||
|     return outer | ||||
| 
 | ||||
| 
 | ||||
| def traced_jsonify(wrapped, instance, args, kwargs): | ||||
|     pin = Pin._find(wrapped, instance, get_current_app()) | ||||
|     if not pin or not pin.enabled(): | ||||
|         return wrapped(*args, **kwargs) | ||||
| 
 | ||||
|     with pin.tracer.trace('flask.jsonify'): | ||||
|         return wrapped(*args, **kwargs) | ||||
|  | @ -0,0 +1,46 @@ | |||
| from ddtrace.vendor.wrapt import function_wrapper | ||||
| 
 | ||||
| from ...pin import Pin | ||||
| from ...utils.importlib import func_name | ||||
| from .helpers import get_current_app | ||||
| 
 | ||||
| 
 | ||||
| def wrap_function(instance, func, name=None, resource=None): | ||||
|     """ | ||||
|     Helper function to wrap common flask.app.Flask methods. | ||||
| 
 | ||||
|     This helper will first ensure that a Pin is available and enabled before tracing | ||||
|     """ | ||||
|     if not name: | ||||
|         name = func_name(func) | ||||
| 
 | ||||
|     @function_wrapper | ||||
|     def trace_func(wrapped, _instance, args, kwargs): | ||||
|         pin = Pin._find(wrapped, _instance, instance, get_current_app()) | ||||
|         if not pin or not pin.enabled(): | ||||
|             return wrapped(*args, **kwargs) | ||||
|         with pin.tracer.trace(name, service=pin.service, resource=resource): | ||||
|             return wrapped(*args, **kwargs) | ||||
| 
 | ||||
|     return trace_func(func) | ||||
| 
 | ||||
| 
 | ||||
| def wrap_signal(app, signal, func): | ||||
|     """ | ||||
|     Helper used to wrap signal handlers | ||||
| 
 | ||||
|     We will attempt to find the pin attached to the flask.app.Flask app | ||||
|     """ | ||||
|     name = func_name(func) | ||||
| 
 | ||||
|     @function_wrapper | ||||
|     def trace_func(wrapped, instance, args, kwargs): | ||||
|         pin = Pin._find(wrapped, instance, app, get_current_app()) | ||||
|         if not pin or not pin.enabled(): | ||||
|             return wrapped(*args, **kwargs) | ||||
| 
 | ||||
|         with pin.tracer.trace(name, service=pin.service) as span: | ||||
|             span.set_tag('flask.signal', signal) | ||||
|             return wrapped(*args, **kwargs) | ||||
| 
 | ||||
|     return trace_func(func) | ||||
|  | @ -0,0 +1,44 @@ | |||
| """ | ||||
| The flask cache tracer will track any access to a cache backend. | ||||
| You can use this tracer together with the Flask tracer middleware. | ||||
| 
 | ||||
| To install the tracer, ``from ddtrace import tracer`` needs to be added:: | ||||
| 
 | ||||
|     from ddtrace import tracer | ||||
|     from ddtrace.contrib.flask_cache import get_traced_cache | ||||
| 
 | ||||
| and the tracer needs to be initialized:: | ||||
| 
 | ||||
|     Cache = get_traced_cache(tracer, service='my-flask-cache-app') | ||||
| 
 | ||||
| Here is the end result, in a sample app:: | ||||
| 
 | ||||
|     from flask import Flask | ||||
| 
 | ||||
|     from ddtrace import tracer | ||||
|     from ddtrace.contrib.flask_cache import get_traced_cache | ||||
| 
 | ||||
|     app = Flask(__name__) | ||||
| 
 | ||||
|     # get the traced Cache class | ||||
|     Cache = get_traced_cache(tracer, service='my-flask-cache-app') | ||||
| 
 | ||||
|     # use the Cache as usual with your preferred CACHE_TYPE | ||||
|     cache = Cache(app, config={'CACHE_TYPE': 'simple'}) | ||||
| 
 | ||||
|     def counter(): | ||||
|         # this access is traced | ||||
|         conn_counter = cache.get("conn_counter") | ||||
| 
 | ||||
| """ | ||||
| 
 | ||||
| from ...utils.importlib import require_modules | ||||
| 
 | ||||
| 
 | ||||
| required_modules = ['flask_cache'] | ||||
| 
 | ||||
| with require_modules(required_modules) as missing_modules: | ||||
|     if not missing_modules: | ||||
|         from .tracers import get_traced_cache | ||||
| 
 | ||||
|         __all__ = ['get_traced_cache'] | ||||
|  | @ -0,0 +1,146 @@ | |||
| """ | ||||
| Datadog trace code for flask_cache | ||||
| """ | ||||
| 
 | ||||
| # stdlib | ||||
| import logging | ||||
| 
 | ||||
| # project | ||||
| from .utils import _extract_conn_tags, _resource_from_cache_prefix | ||||
| from ...constants import ANALYTICS_SAMPLE_RATE_KEY | ||||
| from ...ext import SpanTypes | ||||
| from ...settings import config | ||||
| 
 | ||||
| # 3rd party | ||||
| from flask.ext.cache import Cache | ||||
| 
 | ||||
| 
 | ||||
| log = logging.Logger(__name__) | ||||
| 
 | ||||
| DEFAULT_SERVICE = 'flask-cache' | ||||
| 
 | ||||
| # standard tags | ||||
| COMMAND_KEY = 'flask_cache.key' | ||||
| CACHE_BACKEND = 'flask_cache.backend' | ||||
| CONTACT_POINTS = 'flask_cache.contact_points' | ||||
| 
 | ||||
| 
 | ||||
| def get_traced_cache(ddtracer, service=DEFAULT_SERVICE, meta=None): | ||||
|     """ | ||||
|     Return a traced Cache object that behaves exactly as the ``flask.ext.cache.Cache class`` | ||||
|     """ | ||||
| 
 | ||||
|     class TracedCache(Cache): | ||||
|         """ | ||||
|         Traced cache backend that monitors any operations done by flask_cache. Observed actions are: | ||||
|         * get, set, add, delete, clear | ||||
|         * all ``many_`` operations | ||||
|         """ | ||||
|         _datadog_tracer = ddtracer | ||||
|         _datadog_service = service | ||||
|         _datadog_meta = meta | ||||
| 
 | ||||
|         def __trace(self, cmd): | ||||
|             """ | ||||
|             Start a tracing with default attributes and tags | ||||
|             """ | ||||
|             # create a new span | ||||
|             s = self._datadog_tracer.trace( | ||||
|                 cmd, | ||||
|                 span_type=SpanTypes.CACHE, | ||||
|                 service=self._datadog_service | ||||
|             ) | ||||
|             # set span tags | ||||
|             s.set_tag(CACHE_BACKEND, self.config.get('CACHE_TYPE')) | ||||
|             s.set_tags(self._datadog_meta) | ||||
|             # set analytics sample rate | ||||
|             s.set_tag( | ||||
|                 ANALYTICS_SAMPLE_RATE_KEY, | ||||
|                 config.flask_cache.get_analytics_sample_rate() | ||||
|             ) | ||||
|             # add connection meta if there is one | ||||
|             if getattr(self.cache, '_client', None): | ||||
|                 try: | ||||
|                     s.set_tags(_extract_conn_tags(self.cache._client)) | ||||
|                 except Exception: | ||||
|                     log.debug('error parsing connection tags', exc_info=True) | ||||
| 
 | ||||
|             return s | ||||
| 
 | ||||
|         def get(self, *args, **kwargs): | ||||
|             """ | ||||
|             Track ``get`` operation | ||||
|             """ | ||||
|             with self.__trace('flask_cache.cmd') as span: | ||||
|                 span.resource = _resource_from_cache_prefix('GET', self.config) | ||||
|                 if len(args) > 0: | ||||
|                     span.set_tag(COMMAND_KEY, args[0]) | ||||
|                 return super(TracedCache, self).get(*args, **kwargs) | ||||
| 
 | ||||
|         def set(self, *args, **kwargs): | ||||
|             """ | ||||
|             Track ``set`` operation | ||||
|             """ | ||||
|             with self.__trace('flask_cache.cmd') as span: | ||||
|                 span.resource = _resource_from_cache_prefix('SET', self.config) | ||||
|                 if len(args) > 0: | ||||
|                     span.set_tag(COMMAND_KEY, args[0]) | ||||
|                 return super(TracedCache, self).set(*args, **kwargs) | ||||
| 
 | ||||
|         def add(self, *args, **kwargs): | ||||
|             """ | ||||
|             Track ``add`` operation | ||||
|             """ | ||||
|             with self.__trace('flask_cache.cmd') as span: | ||||
|                 span.resource = _resource_from_cache_prefix('ADD', self.config) | ||||
|                 if len(args) > 0: | ||||
|                     span.set_tag(COMMAND_KEY, args[0]) | ||||
|                 return super(TracedCache, self).add(*args, **kwargs) | ||||
| 
 | ||||
|         def delete(self, *args, **kwargs): | ||||
|             """ | ||||
|             Track ``delete`` operation | ||||
|             """ | ||||
|             with self.__trace('flask_cache.cmd') as span: | ||||
|                 span.resource = _resource_from_cache_prefix('DELETE', self.config) | ||||
|                 if len(args) > 0: | ||||
|                     span.set_tag(COMMAND_KEY, args[0]) | ||||
|                 return super(TracedCache, self).delete(*args, **kwargs) | ||||
| 
 | ||||
|         def delete_many(self, *args, **kwargs): | ||||
|             """ | ||||
|             Track ``delete_many`` operation | ||||
|             """ | ||||
|             with self.__trace('flask_cache.cmd') as span: | ||||
|                 span.resource = _resource_from_cache_prefix('DELETE_MANY', self.config) | ||||
|                 span.set_tag(COMMAND_KEY, list(args)) | ||||
|                 return super(TracedCache, self).delete_many(*args, **kwargs) | ||||
| 
 | ||||
|         def clear(self, *args, **kwargs): | ||||
|             """ | ||||
|             Track ``clear`` operation | ||||
|             """ | ||||
|             with self.__trace('flask_cache.cmd') as span: | ||||
|                 span.resource = _resource_from_cache_prefix('CLEAR', self.config) | ||||
|                 return super(TracedCache, self).clear(*args, **kwargs) | ||||
| 
 | ||||
|         def get_many(self, *args, **kwargs): | ||||
|             """ | ||||
|             Track ``get_many`` operation | ||||
|             """ | ||||
|             with self.__trace('flask_cache.cmd') as span: | ||||
|                 span.resource = _resource_from_cache_prefix('GET_MANY', self.config) | ||||
|                 span.set_tag(COMMAND_KEY, list(args)) | ||||
|                 return super(TracedCache, self).get_many(*args, **kwargs) | ||||
| 
 | ||||
|         def set_many(self, *args, **kwargs): | ||||
|             """ | ||||
|             Track ``set_many`` operation | ||||
|             """ | ||||
|             with self.__trace('flask_cache.cmd') as span: | ||||
|                 span.resource = _resource_from_cache_prefix('SET_MANY', self.config) | ||||
|                 if len(args) > 0: | ||||
|                     span.set_tag(COMMAND_KEY, list(args[0].keys())) | ||||
|                 return super(TracedCache, self).set_many(*args, **kwargs) | ||||
| 
 | ||||
|     return TracedCache | ||||
|  | @ -0,0 +1,46 @@ | |||
| # project | ||||
| from ...ext import net | ||||
| from ..redis.util import _extract_conn_tags as extract_redis_tags | ||||
| from ..pylibmc.addrs import parse_addresses | ||||
| 
 | ||||
| 
 | ||||
| def _resource_from_cache_prefix(resource, cache): | ||||
|     """ | ||||
|     Combine the resource name with the cache prefix (if any) | ||||
|     """ | ||||
|     if getattr(cache, 'key_prefix', None): | ||||
|         name = '{} {}'.format(resource, cache.key_prefix) | ||||
|     else: | ||||
|         name = resource | ||||
| 
 | ||||
|     # enforce lowercase to make the output nicer to read | ||||
|     return name.lower() | ||||
| 
 | ||||
| 
 | ||||
| def _extract_conn_tags(client): | ||||
|     """ | ||||
|     For the given client extracts connection tags | ||||
|     """ | ||||
|     tags = {} | ||||
| 
 | ||||
|     if hasattr(client, 'servers'): | ||||
|         # Memcached backend supports an address pool | ||||
|         if isinstance(client.servers, list) and len(client.servers) > 0: | ||||
|             # use the first address of the pool as a host because | ||||
|             # the code doesn't expose more information | ||||
|             contact_point = client.servers[0].address | ||||
|             tags[net.TARGET_HOST] = contact_point[0] | ||||
|             tags[net.TARGET_PORT] = contact_point[1] | ||||
|     elif hasattr(client, 'connection_pool'): | ||||
|         # Redis main connection | ||||
|         redis_tags = extract_redis_tags(client.connection_pool.connection_kwargs) | ||||
|         tags.update(**redis_tags) | ||||
|     elif hasattr(client, 'addresses'): | ||||
|         # pylibmc | ||||
|         # FIXME[matt] should we memoize this? | ||||
|         addrs = parse_addresses(client.addresses) | ||||
|         if addrs: | ||||
|             _, host, port, _ = addrs[0] | ||||
|             tags[net.TARGET_PORT] = port | ||||
|             tags[net.TARGET_HOST] = host | ||||
|     return tags | ||||
|  | @ -0,0 +1,29 @@ | |||
| """ | ||||
| The ``futures`` integration propagates the current active Tracing Context | ||||
| between threads. The integration ensures that when operations are executed | ||||
| in a new thread, that thread can continue the previously generated trace. | ||||
| 
 | ||||
| The integration doesn't trace automatically threads execution, so manual | ||||
| instrumentation or another integration must be activated. Threads propagation | ||||
| is not enabled by default with the `patch_all()` method and must be activated | ||||
| as follows:: | ||||
| 
 | ||||
|     from ddtrace import patch, patch_all | ||||
| 
 | ||||
|     patch(futures=True) | ||||
|     # or, when instrumenting all libraries | ||||
|     patch_all(futures=True) | ||||
| """ | ||||
| from ...utils.importlib import require_modules | ||||
| 
 | ||||
| 
 | ||||
| required_modules = ['concurrent.futures'] | ||||
| 
 | ||||
| with require_modules(required_modules) as missing_modules: | ||||
|     if not missing_modules: | ||||
|         from .patch import patch, unpatch | ||||
| 
 | ||||
|         __all__ = [ | ||||
|             'patch', | ||||
|             'unpatch', | ||||
|         ] | ||||
|  | @ -0,0 +1,24 @@ | |||
| from concurrent import futures | ||||
| 
 | ||||
| from ddtrace.vendor.wrapt import wrap_function_wrapper as _w | ||||
| 
 | ||||
| from .threading import _wrap_submit | ||||
| from ...utils.wrappers import unwrap as _u | ||||
| 
 | ||||
| 
 | ||||
| def patch(): | ||||
|     """Enables Context Propagation between threads""" | ||||
|     if getattr(futures, '__datadog_patch', False): | ||||
|         return | ||||
|     setattr(futures, '__datadog_patch', True) | ||||
| 
 | ||||
|     _w('concurrent.futures', 'ThreadPoolExecutor.submit', _wrap_submit) | ||||
| 
 | ||||
| 
 | ||||
| def unpatch(): | ||||
|     """Disables Context Propagation between threads""" | ||||
|     if not getattr(futures, '__datadog_patch', False): | ||||
|         return | ||||
|     setattr(futures, '__datadog_patch', False) | ||||
| 
 | ||||
|     _u(futures.ThreadPoolExecutor, 'submit') | ||||
|  | @ -0,0 +1,46 @@ | |||
| import ddtrace | ||||
| 
 | ||||
| 
 | ||||
| def _wrap_submit(func, instance, args, kwargs): | ||||
|     """ | ||||
|     Wrap `Executor` method used to submit a work executed in another | ||||
|     thread. This wrapper ensures that a new `Context` is created and | ||||
|     properly propagated using an intermediate function. | ||||
|     """ | ||||
|     # If there isn't a currently active context, then do not create one | ||||
|     # DEV: Calling `.active()` when there isn't an active context will create a new context | ||||
|     # DEV: We need to do this in case they are either: | ||||
|     #        - Starting nested futures | ||||
|     #        - Starting futures from outside of an existing context | ||||
|     # | ||||
|     #      In either of these cases we essentially will propagate the wrong context between futures | ||||
|     # | ||||
|     #      The resolution is to not create/propagate a new context if one does not exist, but let the | ||||
|     #      future's thread create the context instead. | ||||
|     current_ctx = None | ||||
|     if ddtrace.tracer.context_provider._has_active_context(): | ||||
|         current_ctx = ddtrace.tracer.context_provider.active() | ||||
| 
 | ||||
|         # If we have a context then make sure we clone it | ||||
|         # DEV: We don't know if the future will finish executing before the parent span finishes | ||||
|         #      so we clone to ensure we properly collect/report the future's spans | ||||
|         current_ctx = current_ctx.clone() | ||||
| 
 | ||||
|     # extract the target function that must be executed in | ||||
|     # a new thread and the `target` arguments | ||||
|     fn = args[0] | ||||
|     fn_args = args[1:] | ||||
|     return func(_wrap_execution, current_ctx, fn, fn_args, kwargs) | ||||
| 
 | ||||
| 
 | ||||
| def _wrap_execution(ctx, fn, args, kwargs): | ||||
|     """ | ||||
|     Intermediate target function that is executed in a new thread; | ||||
|     it receives the original function with arguments and keyword | ||||
|     arguments, including our tracing `Context`. The current context | ||||
|     provider sets the Active context in a thread local storage | ||||
|     variable because it's outside the asynchronous loop. | ||||
|     """ | ||||
|     if ctx is not None: | ||||
|         ddtrace.tracer.context_provider.activate(ctx) | ||||
|     return fn(*args, **kwargs) | ||||
|  | @ -0,0 +1,48 @@ | |||
| """ | ||||
| To trace a request in a ``gevent`` environment, configure the tracer to use the greenlet | ||||
| context provider, rather than the default one that relies on a thread-local storaging. | ||||
| 
 | ||||
| This allows the tracer to pick up a transaction exactly where it left off as greenlets | ||||
| yield the context to another one. | ||||
| 
 | ||||
| The simplest way to trace a ``gevent`` application is to configure the tracer and | ||||
| patch ``gevent`` **before importing** the library:: | ||||
| 
 | ||||
|     # patch before importing gevent | ||||
|     from ddtrace import patch, tracer | ||||
|     patch(gevent=True) | ||||
| 
 | ||||
|     # use gevent as usual with or without the monkey module | ||||
|     from gevent import monkey; monkey.patch_thread() | ||||
| 
 | ||||
|     def my_parent_function(): | ||||
|         with tracer.trace("web.request") as span: | ||||
|             span.service = "web" | ||||
|             gevent.spawn(worker_function) | ||||
| 
 | ||||
|     def worker_function(): | ||||
|         # then trace its child | ||||
|         with tracer.trace("greenlet.call") as span: | ||||
|             span.service = "greenlet" | ||||
|             ... | ||||
| 
 | ||||
|             with tracer.trace("greenlet.child_call") as child: | ||||
|                 ... | ||||
| """ | ||||
| from ...utils.importlib import require_modules | ||||
| 
 | ||||
| 
 | ||||
| required_modules = ['gevent'] | ||||
| 
 | ||||
| with require_modules(required_modules) as missing_modules: | ||||
|     if not missing_modules: | ||||
|         from .provider import GeventContextProvider | ||||
|         from .patch import patch, unpatch | ||||
| 
 | ||||
|         context_provider = GeventContextProvider() | ||||
| 
 | ||||
|         __all__ = [ | ||||
|             'patch', | ||||
|             'unpatch', | ||||
|             'context_provider', | ||||
|         ] | ||||
|  | @ -0,0 +1,58 @@ | |||
| import gevent | ||||
| import gevent.pool as gpool | ||||
| 
 | ||||
| from .provider import CONTEXT_ATTR | ||||
| 
 | ||||
| GEVENT_VERSION = gevent.version_info[0:3] | ||||
| 
 | ||||
| 
 | ||||
| class TracingMixin(object): | ||||
|     def __init__(self, *args, **kwargs): | ||||
|         # get the current Context if available | ||||
|         current_g = gevent.getcurrent() | ||||
|         ctx = getattr(current_g, CONTEXT_ATTR, None) | ||||
| 
 | ||||
|         # create the Greenlet as usual | ||||
|         super(TracingMixin, self).__init__(*args, **kwargs) | ||||
| 
 | ||||
|         # the context is always available made exception of the main greenlet | ||||
|         if ctx: | ||||
|             # create a new context that inherits the current active span | ||||
|             new_ctx = ctx.clone() | ||||
|             setattr(self, CONTEXT_ATTR, new_ctx) | ||||
| 
 | ||||
| 
 | ||||
| class TracedGreenlet(TracingMixin, gevent.Greenlet): | ||||
|     """ | ||||
|     ``Greenlet`` class that is used to replace the original ``gevent`` | ||||
|     class. This class is supposed to do ``Context`` replacing operation, so | ||||
|     that any greenlet inherits the context from the parent Greenlet. | ||||
|     When a new greenlet is spawned from the main greenlet, a new instance | ||||
|     of ``Context`` is created. The main greenlet is not affected by this behavior. | ||||
| 
 | ||||
|     There is no need to inherit this class to create or optimize greenlets | ||||
|     instances, because this class replaces ``gevent.greenlet.Greenlet`` | ||||
|     through the ``patch()`` method. After the patch, extending the gevent | ||||
|     ``Greenlet`` class means extending automatically ``TracedGreenlet``. | ||||
|     """ | ||||
|     def __init__(self, *args, **kwargs): | ||||
|         super(TracedGreenlet, self).__init__(*args, **kwargs) | ||||
| 
 | ||||
| 
 | ||||
| class TracedIMapUnordered(TracingMixin, gpool.IMapUnordered): | ||||
|     def __init__(self, *args, **kwargs): | ||||
|         super(TracedIMapUnordered, self).__init__(*args, **kwargs) | ||||
| 
 | ||||
| 
 | ||||
| if GEVENT_VERSION >= (1, 3) or GEVENT_VERSION < (1, 1): | ||||
|     # For gevent <1.1 and >=1.3, IMap is its own class, so we derive | ||||
|     # from TracingMixin | ||||
|     class TracedIMap(TracingMixin, gpool.IMap): | ||||
|         def __init__(self, *args, **kwargs): | ||||
|             super(TracedIMap, self).__init__(*args, **kwargs) | ||||
| else: | ||||
|     # For gevent >=1.1 and <1.3, IMap derives from IMapUnordered, so we derive | ||||
|     # from TracedIMapUnordered and get tracing that way | ||||
|     class TracedIMap(gpool.IMap, TracedIMapUnordered): | ||||
|         def __init__(self, *args, **kwargs): | ||||
|             super(TracedIMap, self).__init__(*args, **kwargs) | ||||
|  | @ -0,0 +1,63 @@ | |||
| import gevent | ||||
| import gevent.pool | ||||
| import ddtrace | ||||
| 
 | ||||
| from .greenlet import TracedGreenlet, TracedIMap, TracedIMapUnordered, GEVENT_VERSION | ||||
| from .provider import GeventContextProvider | ||||
| from ...provider import DefaultContextProvider | ||||
| 
 | ||||
| 
 | ||||
| __Greenlet = gevent.Greenlet | ||||
| __IMap = gevent.pool.IMap | ||||
| __IMapUnordered = gevent.pool.IMapUnordered | ||||
| 
 | ||||
| 
 | ||||
| def patch(): | ||||
|     """ | ||||
|     Patch the gevent module so that all references to the | ||||
|     internal ``Greenlet`` class points to the ``DatadogGreenlet`` | ||||
|     class. | ||||
| 
 | ||||
|     This action ensures that if a user extends the ``Greenlet`` | ||||
|     class, the ``TracedGreenlet`` is used as a parent class. | ||||
|     """ | ||||
|     _replace(TracedGreenlet, TracedIMap, TracedIMapUnordered) | ||||
|     ddtrace.tracer.configure(context_provider=GeventContextProvider()) | ||||
| 
 | ||||
| 
 | ||||
| def unpatch(): | ||||
|     """ | ||||
|     Restore the original ``Greenlet``. This function must be invoked | ||||
|     before executing application code, otherwise the ``DatadogGreenlet`` | ||||
|     class may be used during initialization. | ||||
|     """ | ||||
|     _replace(__Greenlet, __IMap, __IMapUnordered) | ||||
|     ddtrace.tracer.configure(context_provider=DefaultContextProvider()) | ||||
| 
 | ||||
| 
 | ||||
| def _replace(g_class, imap_class, imap_unordered_class): | ||||
|     """ | ||||
|     Utility function that replace the gevent Greenlet class with the given one. | ||||
|     """ | ||||
|     # replace the original Greenlet classes with the new one | ||||
|     gevent.greenlet.Greenlet = g_class | ||||
| 
 | ||||
|     if GEVENT_VERSION >= (1, 3): | ||||
|         # For gevent >= 1.3.0, IMap and IMapUnordered were pulled out of | ||||
|         # gevent.pool and into gevent._imap | ||||
|         gevent._imap.IMap = imap_class | ||||
|         gevent._imap.IMapUnordered = imap_unordered_class | ||||
|         gevent.pool.IMap = gevent._imap.IMap | ||||
|         gevent.pool.IMapUnordered = gevent._imap.IMapUnordered | ||||
|         gevent.pool.Greenlet = gevent.greenlet.Greenlet | ||||
|     else: | ||||
|         # For gevent < 1.3, only patching of gevent.pool classes necessary | ||||
|         gevent.pool.IMap = imap_class | ||||
|         gevent.pool.IMapUnordered = imap_unordered_class | ||||
| 
 | ||||
|     gevent.pool.Group.greenlet_class = g_class | ||||
| 
 | ||||
|     # replace gevent shortcuts | ||||
|     gevent.Greenlet = gevent.greenlet.Greenlet | ||||
|     gevent.spawn = gevent.greenlet.Greenlet.spawn | ||||
|     gevent.spawn_later = gevent.greenlet.Greenlet.spawn_later | ||||
|  | @ -0,0 +1,55 @@ | |||
| import gevent | ||||
| 
 | ||||
| from ...context import Context | ||||
| from ...provider import BaseContextProvider | ||||
| 
 | ||||
| 
 | ||||
| # Greenlet attribute used to set/get the Context instance | ||||
| CONTEXT_ATTR = '__datadog_context' | ||||
| 
 | ||||
| 
 | ||||
| class GeventContextProvider(BaseContextProvider): | ||||
|     """ | ||||
|     Context provider that retrieves all contexts for the current asynchronous | ||||
|     execution. It must be used in asynchronous programming that relies | ||||
|     in the ``gevent`` library. Framework instrumentation that uses the | ||||
|     gevent WSGI server (or gevent in general), can use this provider. | ||||
|     """ | ||||
|     def _get_current_context(self): | ||||
|         """Helper to get the current context from the current greenlet""" | ||||
|         current_g = gevent.getcurrent() | ||||
|         if current_g is not None: | ||||
|             return getattr(current_g, CONTEXT_ATTR, None) | ||||
|         return None | ||||
| 
 | ||||
|     def _has_active_context(self): | ||||
|         """Helper to determine if we have a currently active context""" | ||||
|         return self._get_current_context() is not None | ||||
| 
 | ||||
|     def activate(self, context): | ||||
|         """Sets the scoped ``Context`` for the current running ``Greenlet``. | ||||
|         """ | ||||
|         current_g = gevent.getcurrent() | ||||
|         if current_g is not None: | ||||
|             setattr(current_g, CONTEXT_ATTR, context) | ||||
|             return context | ||||
| 
 | ||||
|     def active(self): | ||||
|         """ | ||||
|         Returns the scoped ``Context`` for this execution flow. The ``Context`` | ||||
|         uses the ``Greenlet`` class as a carrier, and everytime a greenlet | ||||
|         is created it receives the "parent" context. | ||||
|         """ | ||||
|         ctx = self._get_current_context() | ||||
|         if ctx is not None: | ||||
|             # return the active Context for this greenlet (if any) | ||||
|             return ctx | ||||
| 
 | ||||
|         # the Greenlet doesn't have a Context so it's created and attached | ||||
|         # even to the main greenlet. This is required in Distributed Tracing | ||||
|         # when a new arbitrary Context is provided. | ||||
|         current_g = gevent.getcurrent() | ||||
|         if current_g: | ||||
|             ctx = Context() | ||||
|             setattr(current_g, CONTEXT_ATTR, ctx) | ||||
|             return ctx | ||||
|  | @ -0,0 +1,57 @@ | |||
| """ | ||||
| The gRPC integration traces the client and server using interceptor pattern. | ||||
| 
 | ||||
| gRPC will be automatically instrumented with ``patch_all``, or when using | ||||
| the ``ddtrace-run`` command. | ||||
| gRPC is instrumented on import. To instrument gRPC manually use the | ||||
| ``patch`` function.:: | ||||
| 
 | ||||
|     import grpc | ||||
|     from ddtrace import patch | ||||
|     patch(grpc=True) | ||||
| 
 | ||||
|     # use grpc like usual | ||||
| 
 | ||||
| To configure the gRPC integration on an per-channel basis use the | ||||
| ``Pin`` API:: | ||||
| 
 | ||||
|     import grpc | ||||
|     from ddtrace import Pin, patch, Tracer | ||||
| 
 | ||||
|     patch(grpc=True) | ||||
|     custom_tracer = Tracer() | ||||
| 
 | ||||
|     # override the pin on the client | ||||
|     Pin.override(grpc.Channel, service='mygrpc', tracer=custom_tracer) | ||||
|     with grpc.insecure_channel('localhost:50051') as channel: | ||||
|         # create stubs and send requests | ||||
|         pass | ||||
| 
 | ||||
| To configure the gRPC integration on the server use the ``Pin`` API:: | ||||
| 
 | ||||
|     import grpc | ||||
|     from grpc.framework.foundation import logging_pool | ||||
| 
 | ||||
|     from ddtrace import Pin, patch, Tracer | ||||
| 
 | ||||
|     patch(grpc=True) | ||||
|     custom_tracer = Tracer() | ||||
| 
 | ||||
|     # override the pin on the server | ||||
|     Pin.override(grpc.Server, service='mygrpc', tracer=custom_tracer) | ||||
|     server = grpc.server(logging_pool.pool(2)) | ||||
|     server.add_insecure_port('localhost:50051') | ||||
|     add_MyServicer_to_server(MyServicer(), server) | ||||
|     server.start() | ||||
| """ | ||||
| 
 | ||||
| 
 | ||||
| from ...utils.importlib import require_modules | ||||
| 
 | ||||
| required_modules = ['grpc'] | ||||
| 
 | ||||
| with require_modules(required_modules) as missing_modules: | ||||
|     if not missing_modules: | ||||
|         from .patch import patch, unpatch | ||||
| 
 | ||||
|         __all__ = ['patch', 'unpatch'] | ||||
|  | @ -0,0 +1,239 @@ | |||
| import collections | ||||
| import grpc | ||||
| from ddtrace.vendor import wrapt | ||||
| 
 | ||||
| from ddtrace import config | ||||
| from ddtrace.compat import to_unicode | ||||
| from ddtrace.ext import SpanTypes, errors | ||||
| from ...internal.logger import get_logger | ||||
| from ...propagation.http import HTTPPropagator | ||||
| from ...constants import ANALYTICS_SAMPLE_RATE_KEY | ||||
| from . import constants | ||||
| from .utils import parse_method_path | ||||
| 
 | ||||
| 
 | ||||
| log = get_logger(__name__) | ||||
| 
 | ||||
| # DEV: Follows Python interceptors RFC laid out in | ||||
| # https://github.com/grpc/proposal/blob/master/L13-python-interceptors.md | ||||
| 
 | ||||
| # DEV: __version__ added in v1.21.4 | ||||
| # https://github.com/grpc/grpc/commit/dd4830eae80143f5b0a9a3a1a024af4cf60e7d02 | ||||
| 
 | ||||
| 
 | ||||
| def create_client_interceptor(pin, host, port): | ||||
|     return _ClientInterceptor(pin, host, port) | ||||
| 
 | ||||
| 
 | ||||
| def intercept_channel(wrapped, instance, args, kwargs): | ||||
|     channel = args[0] | ||||
|     interceptors = args[1:] | ||||
|     if isinstance(getattr(channel, "_interceptor", None), _ClientInterceptor): | ||||
|         dd_interceptor = channel._interceptor | ||||
|         base_channel = getattr(channel, "_channel", None) | ||||
|         if base_channel: | ||||
|             new_channel = wrapped(channel._channel, *interceptors) | ||||
|             return grpc.intercept_channel(new_channel, dd_interceptor) | ||||
| 
 | ||||
|     return wrapped(*args, **kwargs) | ||||
| 
 | ||||
| 
 | ||||
| class _ClientCallDetails( | ||||
|     collections.namedtuple("_ClientCallDetails", ("method", "timeout", "metadata", "credentials")), | ||||
|     grpc.ClientCallDetails, | ||||
| ): | ||||
|     pass | ||||
| 
 | ||||
| 
 | ||||
| def _future_done_callback(span): | ||||
|     def func(response): | ||||
|         try: | ||||
|             # pull out response code from gRPC response to use both for `grpc.status.code` | ||||
|             # tag and the error type tag if the response is an exception | ||||
|             response_code = response.code() | ||||
|             # cast code to unicode for tags | ||||
|             status_code = to_unicode(response_code) | ||||
|             span.set_tag(constants.GRPC_STATUS_CODE_KEY, status_code) | ||||
| 
 | ||||
|             if response_code != grpc.StatusCode.OK: | ||||
|                 _handle_error(span, response, status_code) | ||||
|         finally: | ||||
|             span.finish() | ||||
| 
 | ||||
|     return func | ||||
| 
 | ||||
| 
 | ||||
| def _handle_response(span, response): | ||||
|     if isinstance(response, grpc.Future): | ||||
|         response.add_done_callback(_future_done_callback(span)) | ||||
| 
 | ||||
| 
 | ||||
| def _handle_error(span, response_error, status_code): | ||||
|     # response_error should be a grpc.Future and so we expect to have cancelled(), | ||||
|     # exception() and traceback() methods if a computation has resulted in an | ||||
|     # exception being raised | ||||
|     if ( | ||||
|         not callable(getattr(response_error, "cancelled", None)) | ||||
|         and not callable(getattr(response_error, "exception", None)) | ||||
|         and not callable(getattr(response_error, "traceback", None)) | ||||
|     ): | ||||
|         return | ||||
| 
 | ||||
|     if response_error.cancelled(): | ||||
|         # handle cancelled futures separately to avoid raising grpc.FutureCancelledError | ||||
|         span.error = 1 | ||||
|         exc_val = to_unicode(response_error.details()) | ||||
|         span.set_tag(errors.ERROR_MSG, exc_val) | ||||
|         span.set_tag(errors.ERROR_TYPE, status_code) | ||||
|         return | ||||
| 
 | ||||
|     exception = response_error.exception() | ||||
|     traceback = response_error.traceback() | ||||
| 
 | ||||
|     if exception is not None and traceback is not None: | ||||
|         span.error = 1 | ||||
|         if isinstance(exception, grpc.RpcError): | ||||
|             # handle internal gRPC exceptions separately to get status code and | ||||
|             # details as tags properly | ||||
|             exc_val = to_unicode(response_error.details()) | ||||
|             span.set_tag(errors.ERROR_MSG, exc_val) | ||||
|             span.set_tag(errors.ERROR_TYPE, status_code) | ||||
|             span.set_tag(errors.ERROR_STACK, traceback) | ||||
|         else: | ||||
|             exc_type = type(exception) | ||||
|             span.set_exc_info(exc_type, exception, traceback) | ||||
|             status_code = to_unicode(response_error.code()) | ||||
| 
 | ||||
| 
 | ||||
| class _WrappedResponseCallFuture(wrapt.ObjectProxy): | ||||
|     def __init__(self, wrapped, span): | ||||
|         super(_WrappedResponseCallFuture, self).__init__(wrapped) | ||||
|         self._span = span | ||||
| 
 | ||||
|     def __iter__(self): | ||||
|         return self | ||||
| 
 | ||||
|     def __next__(self): | ||||
|         try: | ||||
|             return next(self.__wrapped__) | ||||
|         except StopIteration: | ||||
|             # at end of iteration handle response status from wrapped future | ||||
|             _handle_response(self._span, self.__wrapped__) | ||||
|             raise | ||||
|         except grpc.RpcError as rpc_error: | ||||
|             # DEV: grpcio<1.18.0 grpc.RpcError is raised rather than returned as response | ||||
|             # https://github.com/grpc/grpc/commit/8199aff7a66460fbc4e9a82ade2e95ef076fd8f9 | ||||
|             # handle as a response | ||||
|             _handle_response(self._span, rpc_error) | ||||
|             raise | ||||
|         except Exception: | ||||
|             # DEV: added for safety though should not be reached since wrapped response | ||||
|             log.debug("unexpected non-grpc exception raised, closing open span", exc_info=True) | ||||
|             self._span.set_traceback() | ||||
|             self._span.finish() | ||||
|             raise | ||||
| 
 | ||||
|     def next(self): | ||||
|         return self.__next__() | ||||
| 
 | ||||
| 
 | ||||
| class _ClientInterceptor( | ||||
|     grpc.UnaryUnaryClientInterceptor, | ||||
|     grpc.UnaryStreamClientInterceptor, | ||||
|     grpc.StreamUnaryClientInterceptor, | ||||
|     grpc.StreamStreamClientInterceptor, | ||||
| ): | ||||
|     def __init__(self, pin, host, port): | ||||
|         self._pin = pin | ||||
|         self._host = host | ||||
|         self._port = port | ||||
| 
 | ||||
|     def _intercept_client_call(self, method_kind, client_call_details): | ||||
|         tracer = self._pin.tracer | ||||
| 
 | ||||
|         span = tracer.trace( | ||||
|             "grpc", span_type=SpanTypes.GRPC, service=self._pin.service, resource=client_call_details.method, | ||||
|         ) | ||||
| 
 | ||||
|         # tags for method details | ||||
|         method_path = client_call_details.method | ||||
|         method_package, method_service, method_name = parse_method_path(method_path) | ||||
|         span.set_tag(constants.GRPC_METHOD_PATH_KEY, method_path) | ||||
|         span.set_tag(constants.GRPC_METHOD_PACKAGE_KEY, method_package) | ||||
|         span.set_tag(constants.GRPC_METHOD_SERVICE_KEY, method_service) | ||||
|         span.set_tag(constants.GRPC_METHOD_NAME_KEY, method_name) | ||||
|         span.set_tag(constants.GRPC_METHOD_KIND_KEY, method_kind) | ||||
|         span.set_tag(constants.GRPC_HOST_KEY, self._host) | ||||
|         span.set_tag(constants.GRPC_PORT_KEY, self._port) | ||||
|         span.set_tag(constants.GRPC_SPAN_KIND_KEY, constants.GRPC_SPAN_KIND_VALUE_CLIENT) | ||||
| 
 | ||||
|         sample_rate = config.grpc.get_analytics_sample_rate() | ||||
|         if sample_rate is not None: | ||||
|             span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, sample_rate) | ||||
| 
 | ||||
|         # inject tags from pin | ||||
|         if self._pin.tags: | ||||
|             span.set_tags(self._pin.tags) | ||||
| 
 | ||||
|         # propagate distributed tracing headers if available | ||||
|         headers = {} | ||||
|         if config.grpc.distributed_tracing_enabled: | ||||
|             propagator = HTTPPropagator() | ||||
|             propagator.inject(span.context, headers) | ||||
| 
 | ||||
|         metadata = [] | ||||
|         if client_call_details.metadata is not None: | ||||
|             metadata = list(client_call_details.metadata) | ||||
|         metadata.extend(headers.items()) | ||||
| 
 | ||||
|         client_call_details = _ClientCallDetails( | ||||
|             client_call_details.method, client_call_details.timeout, metadata, client_call_details.credentials, | ||||
|         ) | ||||
| 
 | ||||
|         return span, client_call_details | ||||
| 
 | ||||
|     def intercept_unary_unary(self, continuation, client_call_details, request): | ||||
|         span, client_call_details = self._intercept_client_call(constants.GRPC_METHOD_KIND_UNARY, client_call_details,) | ||||
|         try: | ||||
|             response = continuation(client_call_details, request) | ||||
|             _handle_response(span, response) | ||||
|         except grpc.RpcError as rpc_error: | ||||
|             # DEV: grpcio<1.18.0 grpc.RpcError is raised rather than returned as response | ||||
|             # https://github.com/grpc/grpc/commit/8199aff7a66460fbc4e9a82ade2e95ef076fd8f9 | ||||
|             # handle as a response | ||||
|             _handle_response(span, rpc_error) | ||||
|             raise | ||||
| 
 | ||||
|         return response | ||||
| 
 | ||||
|     def intercept_unary_stream(self, continuation, client_call_details, request): | ||||
|         span, client_call_details = self._intercept_client_call( | ||||
|             constants.GRPC_METHOD_KIND_SERVER_STREAMING, client_call_details, | ||||
|         ) | ||||
|         response_iterator = continuation(client_call_details, request) | ||||
|         response_iterator = _WrappedResponseCallFuture(response_iterator, span) | ||||
|         return response_iterator | ||||
| 
 | ||||
|     def intercept_stream_unary(self, continuation, client_call_details, request_iterator): | ||||
|         span, client_call_details = self._intercept_client_call( | ||||
|             constants.GRPC_METHOD_KIND_CLIENT_STREAMING, client_call_details, | ||||
|         ) | ||||
|         try: | ||||
|             response = continuation(client_call_details, request_iterator) | ||||
|             _handle_response(span, response) | ||||
|         except grpc.RpcError as rpc_error: | ||||
|             # DEV: grpcio<1.18.0 grpc.RpcError is raised rather than returned as response | ||||
|             # https://github.com/grpc/grpc/commit/8199aff7a66460fbc4e9a82ade2e95ef076fd8f9 | ||||
|             # handle as a response | ||||
|             _handle_response(span, rpc_error) | ||||
|             raise | ||||
| 
 | ||||
|         return response | ||||
| 
 | ||||
|     def intercept_stream_stream(self, continuation, client_call_details, request_iterator): | ||||
|         span, client_call_details = self._intercept_client_call( | ||||
|             constants.GRPC_METHOD_KIND_BIDI_STREAMING, client_call_details, | ||||
|         ) | ||||
|         response_iterator = continuation(client_call_details, request_iterator) | ||||
|         response_iterator = _WrappedResponseCallFuture(response_iterator, span) | ||||
|         return response_iterator | ||||
|  | @ -0,0 +1,24 @@ | |||
| import grpc | ||||
| 
 | ||||
| 
 | ||||
| GRPC_PIN_MODULE_SERVER = grpc.Server | ||||
| GRPC_PIN_MODULE_CLIENT = grpc.Channel | ||||
| GRPC_METHOD_PATH_KEY = 'grpc.method.path' | ||||
| GRPC_METHOD_PACKAGE_KEY = 'grpc.method.package' | ||||
| GRPC_METHOD_SERVICE_KEY = 'grpc.method.service' | ||||
| GRPC_METHOD_NAME_KEY = 'grpc.method.name' | ||||
| GRPC_METHOD_KIND_KEY = 'grpc.method.kind' | ||||
| GRPC_STATUS_CODE_KEY = 'grpc.status.code' | ||||
| GRPC_REQUEST_METADATA_PREFIX_KEY = 'grpc.request.metadata.' | ||||
| GRPC_RESPONSE_METADATA_PREFIX_KEY = 'grpc.response.metadata.' | ||||
| GRPC_HOST_KEY = 'grpc.host' | ||||
| GRPC_PORT_KEY = 'grpc.port' | ||||
| GRPC_SPAN_KIND_KEY = 'span.kind' | ||||
| GRPC_SPAN_KIND_VALUE_CLIENT = 'client' | ||||
| GRPC_SPAN_KIND_VALUE_SERVER = 'server' | ||||
| GRPC_METHOD_KIND_UNARY = 'unary' | ||||
| GRPC_METHOD_KIND_CLIENT_STREAMING = 'client_streaming' | ||||
| GRPC_METHOD_KIND_SERVER_STREAMING = 'server_streaming' | ||||
| GRPC_METHOD_KIND_BIDI_STREAMING = 'bidi_streaming' | ||||
| GRPC_SERVICE_SERVER = 'grpc-server' | ||||
| GRPC_SERVICE_CLIENT = 'grpc-client' | ||||
|  | @ -0,0 +1,126 @@ | |||
| import grpc | ||||
| import os | ||||
| 
 | ||||
| from ddtrace.vendor.wrapt import wrap_function_wrapper as _w | ||||
| from ddtrace import config, Pin | ||||
| 
 | ||||
| from ...utils.wrappers import unwrap as _u | ||||
| 
 | ||||
| from . import constants | ||||
| from .client_interceptor import create_client_interceptor, intercept_channel | ||||
| from .server_interceptor import create_server_interceptor | ||||
| 
 | ||||
| 
 | ||||
| config._add('grpc_server', dict( | ||||
|     service_name=os.environ.get('DATADOG_SERVICE_NAME', constants.GRPC_SERVICE_SERVER), | ||||
|     distributed_tracing_enabled=True, | ||||
| )) | ||||
| 
 | ||||
| # TODO[tbutt]: keeping name for client config unchanged to maintain backwards | ||||
| # compatibility but should change in future | ||||
| config._add('grpc', dict( | ||||
|     service_name='{}-{}'.format( | ||||
|         os.environ.get('DATADOG_SERVICE_NAME'), constants.GRPC_SERVICE_CLIENT | ||||
|     ) if os.environ.get('DATADOG_SERVICE_NAME') else constants.GRPC_SERVICE_CLIENT, | ||||
|     distributed_tracing_enabled=True, | ||||
| )) | ||||
| 
 | ||||
| 
 | ||||
| def patch(): | ||||
|     _patch_client() | ||||
|     _patch_server() | ||||
| 
 | ||||
| 
 | ||||
| def unpatch(): | ||||
|     _unpatch_client() | ||||
|     _unpatch_server() | ||||
| 
 | ||||
| 
 | ||||
| def _patch_client(): | ||||
|     if getattr(constants.GRPC_PIN_MODULE_CLIENT, '__datadog_patch', False): | ||||
|         return | ||||
|     setattr(constants.GRPC_PIN_MODULE_CLIENT, '__datadog_patch', True) | ||||
| 
 | ||||
|     Pin(service=config.grpc.service_name).onto(constants.GRPC_PIN_MODULE_CLIENT) | ||||
| 
 | ||||
|     _w('grpc', 'insecure_channel', _client_channel_interceptor) | ||||
|     _w('grpc', 'secure_channel', _client_channel_interceptor) | ||||
|     _w('grpc', 'intercept_channel', intercept_channel) | ||||
| 
 | ||||
| 
 | ||||
| def _unpatch_client(): | ||||
|     if not getattr(constants.GRPC_PIN_MODULE_CLIENT, '__datadog_patch', False): | ||||
|         return | ||||
|     setattr(constants.GRPC_PIN_MODULE_CLIENT, '__datadog_patch', False) | ||||
| 
 | ||||
|     pin = Pin.get_from(constants.GRPC_PIN_MODULE_CLIENT) | ||||
|     if pin: | ||||
|         pin.remove_from(constants.GRPC_PIN_MODULE_CLIENT) | ||||
| 
 | ||||
|     _u(grpc, 'secure_channel') | ||||
|     _u(grpc, 'insecure_channel') | ||||
| 
 | ||||
| 
 | ||||
| def _patch_server(): | ||||
|     if getattr(constants.GRPC_PIN_MODULE_SERVER, '__datadog_patch', False): | ||||
|         return | ||||
|     setattr(constants.GRPC_PIN_MODULE_SERVER, '__datadog_patch', True) | ||||
| 
 | ||||
|     Pin(service=config.grpc_server.service_name).onto(constants.GRPC_PIN_MODULE_SERVER) | ||||
| 
 | ||||
|     _w('grpc', 'server', _server_constructor_interceptor) | ||||
| 
 | ||||
| 
 | ||||
| def _unpatch_server(): | ||||
|     if not getattr(constants.GRPC_PIN_MODULE_SERVER, '__datadog_patch', False): | ||||
|         return | ||||
|     setattr(constants.GRPC_PIN_MODULE_SERVER, '__datadog_patch', False) | ||||
| 
 | ||||
|     pin = Pin.get_from(constants.GRPC_PIN_MODULE_SERVER) | ||||
|     if pin: | ||||
|         pin.remove_from(constants.GRPC_PIN_MODULE_SERVER) | ||||
| 
 | ||||
|     _u(grpc, 'server') | ||||
| 
 | ||||
| 
 | ||||
| def _client_channel_interceptor(wrapped, instance, args, kwargs): | ||||
|     channel = wrapped(*args, **kwargs) | ||||
| 
 | ||||
|     pin = Pin.get_from(constants.GRPC_PIN_MODULE_CLIENT) | ||||
|     if not pin or not pin.enabled(): | ||||
|         return channel | ||||
| 
 | ||||
|     (host, port) = _parse_target_from_arguments(args, kwargs) | ||||
| 
 | ||||
|     interceptor_function = create_client_interceptor(pin, host, port) | ||||
|     return grpc.intercept_channel(channel, interceptor_function) | ||||
| 
 | ||||
| 
 | ||||
| def _server_constructor_interceptor(wrapped, instance, args, kwargs): | ||||
|     # DEV: we clone the pin on the grpc module and configure it for the server | ||||
|     # interceptor | ||||
| 
 | ||||
|     pin = Pin.get_from(constants.GRPC_PIN_MODULE_SERVER) | ||||
|     if not pin or not pin.enabled(): | ||||
|         return wrapped(*args, **kwargs) | ||||
| 
 | ||||
|     interceptor = create_server_interceptor(pin) | ||||
| 
 | ||||
|     # DEV: Inject our tracing interceptor first in the list of interceptors | ||||
|     if 'interceptors' in kwargs: | ||||
|         kwargs['interceptors'] = (interceptor,) + tuple(kwargs['interceptors']) | ||||
|     else: | ||||
|         kwargs['interceptors'] = (interceptor,) | ||||
| 
 | ||||
|     return wrapped(*args, **kwargs) | ||||
| 
 | ||||
| 
 | ||||
| def _parse_target_from_arguments(args, kwargs): | ||||
|     if 'target' in kwargs: | ||||
|         target = kwargs['target'] | ||||
|     else: | ||||
|         target = args[0] | ||||
| 
 | ||||
|     split = target.rsplit(':', 2) | ||||
| 
 | ||||
|     return (split[0], split[1] if len(split) > 1 else None) | ||||
|  | @ -0,0 +1,146 @@ | |||
| import grpc | ||||
| from ddtrace.vendor import wrapt | ||||
| 
 | ||||
| from ddtrace import config | ||||
| from ddtrace.ext import errors | ||||
| from ddtrace.compat import to_unicode | ||||
| 
 | ||||
| from ...constants import ANALYTICS_SAMPLE_RATE_KEY | ||||
| from ...ext import SpanTypes | ||||
| from ...propagation.http import HTTPPropagator | ||||
| from . import constants | ||||
| from .utils import parse_method_path | ||||
| 
 | ||||
| 
 | ||||
| def create_server_interceptor(pin): | ||||
|     def interceptor_function(continuation, handler_call_details): | ||||
|         if not pin.enabled: | ||||
|             return continuation(handler_call_details) | ||||
| 
 | ||||
|         rpc_method_handler = continuation(handler_call_details) | ||||
|         return _TracedRpcMethodHandler(pin, handler_call_details, rpc_method_handler) | ||||
| 
 | ||||
|     return _ServerInterceptor(interceptor_function) | ||||
| 
 | ||||
| 
 | ||||
| def _handle_server_exception(server_context, span): | ||||
|     if server_context is not None and \ | ||||
|        hasattr(server_context, '_state') and \ | ||||
|        server_context._state is not None: | ||||
|         code = to_unicode(server_context._state.code) | ||||
|         details = to_unicode(server_context._state.details) | ||||
|         span.error = 1 | ||||
|         span.set_tag(errors.ERROR_MSG, details) | ||||
|         span.set_tag(errors.ERROR_TYPE, code) | ||||
| 
 | ||||
| 
 | ||||
| def _wrap_response_iterator(response_iterator, server_context, span): | ||||
|     try: | ||||
|         for response in response_iterator: | ||||
|             yield response | ||||
|     except Exception: | ||||
|         span.set_traceback() | ||||
|         _handle_server_exception(server_context, span) | ||||
|         raise | ||||
|     finally: | ||||
|         span.finish() | ||||
| 
 | ||||
| 
 | ||||
| class _TracedRpcMethodHandler(wrapt.ObjectProxy): | ||||
|     def __init__(self, pin, handler_call_details, wrapped): | ||||
|         super(_TracedRpcMethodHandler, self).__init__(wrapped) | ||||
|         self._pin = pin | ||||
|         self._handler_call_details = handler_call_details | ||||
| 
 | ||||
|     def _fn(self, method_kind, behavior, args, kwargs): | ||||
|         if config.grpc_server.distributed_tracing_enabled: | ||||
|             headers = dict(self._handler_call_details.invocation_metadata) | ||||
|             propagator = HTTPPropagator() | ||||
|             context = propagator.extract(headers) | ||||
| 
 | ||||
|             if context.trace_id: | ||||
|                 self._pin.tracer.context_provider.activate(context) | ||||
| 
 | ||||
|         tracer = self._pin.tracer | ||||
| 
 | ||||
|         span = tracer.trace( | ||||
|             'grpc', | ||||
|             span_type=SpanTypes.GRPC, | ||||
|             service=self._pin.service, | ||||
|             resource=self._handler_call_details.method, | ||||
|         ) | ||||
| 
 | ||||
|         method_path = self._handler_call_details.method | ||||
|         method_package, method_service, method_name = parse_method_path(method_path) | ||||
|         span.set_tag(constants.GRPC_METHOD_PATH_KEY, method_path) | ||||
|         span.set_tag(constants.GRPC_METHOD_PACKAGE_KEY, method_package) | ||||
|         span.set_tag(constants.GRPC_METHOD_SERVICE_KEY, method_service) | ||||
|         span.set_tag(constants.GRPC_METHOD_NAME_KEY, method_name) | ||||
|         span.set_tag(constants.GRPC_METHOD_KIND_KEY, method_kind) | ||||
|         span.set_tag(constants.GRPC_SPAN_KIND_KEY, constants.GRPC_SPAN_KIND_VALUE_SERVER) | ||||
| 
 | ||||
|         sample_rate = config.grpc_server.get_analytics_sample_rate() | ||||
|         if sample_rate is not None: | ||||
|             span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, sample_rate) | ||||
| 
 | ||||
|         # access server context by taking second argument as server context | ||||
|         # if not found, skip using context to tag span with server state information | ||||
|         server_context = args[1] if isinstance(args[1], grpc.ServicerContext) else None | ||||
| 
 | ||||
|         if self._pin.tags: | ||||
|             span.set_tags(self._pin.tags) | ||||
| 
 | ||||
|         try: | ||||
|             response_or_iterator = behavior(*args, **kwargs) | ||||
| 
 | ||||
|             if self.__wrapped__.response_streaming: | ||||
|                 response_or_iterator = _wrap_response_iterator(response_or_iterator, server_context, span) | ||||
|         except Exception: | ||||
|             span.set_traceback() | ||||
|             _handle_server_exception(server_context, span) | ||||
|             raise | ||||
|         finally: | ||||
|             if not self.__wrapped__.response_streaming: | ||||
|                 span.finish() | ||||
| 
 | ||||
|         return response_or_iterator | ||||
| 
 | ||||
|     def unary_unary(self, *args, **kwargs): | ||||
|         return self._fn( | ||||
|             constants.GRPC_METHOD_KIND_UNARY, | ||||
|             self.__wrapped__.unary_unary, | ||||
|             args, | ||||
|             kwargs | ||||
|         ) | ||||
| 
 | ||||
|     def unary_stream(self, *args, **kwargs): | ||||
|         return self._fn( | ||||
|             constants.GRPC_METHOD_KIND_SERVER_STREAMING, | ||||
|             self.__wrapped__.unary_stream, | ||||
|             args, | ||||
|             kwargs | ||||
|         ) | ||||
| 
 | ||||
|     def stream_unary(self, *args, **kwargs): | ||||
|         return self._fn( | ||||
|             constants.GRPC_METHOD_KIND_CLIENT_STREAMING, | ||||
|             self.__wrapped__.stream_unary, | ||||
|             args, | ||||
|             kwargs | ||||
|         ) | ||||
| 
 | ||||
|     def stream_stream(self, *args, **kwargs): | ||||
|         return self._fn( | ||||
|             constants.GRPC_METHOD_KIND_BIDI_STREAMING, | ||||
|             self.__wrapped__.stream_stream, | ||||
|             args, | ||||
|             kwargs | ||||
|         ) | ||||
| 
 | ||||
| 
 | ||||
| class _ServerInterceptor(grpc.ServerInterceptor): | ||||
|     def __init__(self, interceptor_function): | ||||
|         self._fn = interceptor_function | ||||
| 
 | ||||
|     def intercept_service(self, continuation, handler_call_details): | ||||
|         return self._fn(continuation, handler_call_details) | ||||
Some files were not shown because too many files have changed in this diff Show More
		Loading…
	
		Reference in New Issue