From ca8bd543a6d14771f5b82a259483138ad370d767 Mon Sep 17 00:00:00 2001 From: Chih-Yu Yeh Date: Wed, 24 Jul 2024 10:26:48 +0800 Subject: [PATCH] chore(wren-ai-service): update indexing async components and update tests to async version (#549) * update * fix conflict * fix conflict * update * fix conflict * remove unused code * fix bug * fix conflict * fix conflict * fix demo ui * update * add sql regenerations api boilerplate * fix conflicts * update * fix conflicts * fix conflicts * fix conflicts * fix conflicts * update sql explanation api and pipeline * update * update sql explanation api * refine sql explanation pipeline * fix pipeline * fix conflict * fix sql formatting * resolve conflict * fix bug * resolve conflict * fix conflict * rebase * make sql_regeneration async * fix broken import * fix async await * use logger.exception instead of logger.error * fix bugs * simplify pipeline * refine prompt * update sql_explanation api by allowing passing multiple steps of sqls * remove redundant code * fix bug * update ui * update * update ui * update * fix conflict * orjson dump and formatting for debug messages * fix tests * fix conflict * fix bugs * fix bug * fix conflict * update * fix conflict * update sql explanation results * fix groupByKeys bug * update * update * update groupByKeys * update engine configs * add OTHERS error code * refine ui: use sidebar * fix conflict * fix conflict * fix bug * fix imports * allow users to choose openai llm * update * update prompt * fix bug * fix tests * fix bug * fix conflicts * update prompt and fix bugs * update * fix bug * fix * fix engine as wren_ui * remove unused dataset * fix sql explanation * fix groupByKey id * update * change EngineConfig location and update .env.dev.example * give defaults to EngineConfig * update * fix * add async * add async document writer * add pytest-asyncio and update tests * update * revert * fix * fix --- wren-ai-service/poetry.lock | 20 +++- wren-ai-service/pyproject.toml | 1 + .../src/pipelines/indexing/indexing.py | 53 +++++--- .../src/providers/document_store/qdrant.py | 75 +++++++++++- .../tests/pytest/pipelines/test_ask.py | 113 ++++++++---------- .../pytest/pipelines/test_ask_details.py | 12 +- .../pytest/pipelines/test_document_cleaner.py | 37 +++--- .../tests/pytest/services/test_ask.py | 16 ++- .../tests/pytest/services/test_ask_details.py | 11 +- 9 files changed, 223 insertions(+), 115 deletions(-) diff --git a/wren-ai-service/poetry.lock b/wren-ai-service/poetry.lock index 31b33c696..f35772699 100644 --- a/wren-ai-service/poetry.lock +++ b/wren-ai-service/poetry.lock @@ -3407,6 +3407,24 @@ pluggy = ">=1.5,<2.0" [package.extras] dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] +[[package]] +name = "pytest-asyncio" +version = "0.23.8" +description = "Pytest support for asyncio" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pytest_asyncio-0.23.8-py3-none-any.whl", hash = "sha256:50265d892689a5faefb84df80819d1ecef566eb3549cf915dfb33569359d1ce2"}, + {file = "pytest_asyncio-0.23.8.tar.gz", hash = "sha256:759b10b33a6dc61cce40a8bd5205e302978bbbcc00e279a8b61d9a6a3c82e4d3"}, +] + +[package.dependencies] +pytest = ">=7.0.0,<9" + +[package.extras] +docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1.0)"] +testing = ["coverage (>=6.2)", "hypothesis (>=5.7.1)"] + [[package]] name = "pytest-cov" version = "4.1.0" @@ -5284,4 +5302,4 @@ testing = ["coverage (>=5.0.3)", "zope.event", "zope.testing"] [metadata] lock-version = "2.0" python-versions = ">=3.12.*, <4.0" -content-hash = "1eb1222e280274973a23dc37267c3040fab1ccf1d05a166e6a55d3c649b7b9ac" +content-hash = "7883794785e5c178681df1d5d3cb7495b9efcd702f8d85555026634468489c41" diff --git a/wren-ai-service/pyproject.toml b/wren-ai-service/pyproject.toml index bee03d455..277742f0d 100644 --- a/wren-ai-service/pyproject.toml +++ b/wren-ai-service/pyproject.toml @@ -39,6 +39,7 @@ ragas-haystack = "==0.1.3" psycopg2-binary = "==2.9.9" setuptools = "==70.0.0" locust = "==2.28.0" +pytest-asyncio = "==0.23.8" [tool.poetry.group.eval.dependencies] tomlkit = "==0.13.0" diff --git a/wren-ai-service/src/pipelines/indexing/indexing.py b/wren-ai-service/src/pipelines/indexing/indexing.py index 920b158b7..46c64c335 100644 --- a/wren-ai-service/src/pipelines/indexing/indexing.py +++ b/wren-ai-service/src/pipelines/indexing/indexing.py @@ -1,9 +1,10 @@ +import asyncio import json import logging import os import sys from pathlib import Path -from typing import Any, Dict, List +from typing import Any, Dict, List, Optional import orjson from hamilton import base @@ -35,14 +36,15 @@ def __init__(self, stores: List[DocumentStore]) -> None: self._stores = stores @component.output_types(mdl=str) - def run(self, mdl: str) -> str: - def _clear_documents(store: DocumentStore) -> None: - ids = [str(i) for i in range(store.count_documents())] + async def run(self, mdl: str) -> str: + async def _clear_documents(store: DocumentStore) -> None: + document_count = await store.count_documents() + ids = [str(i) for i in range(document_count)] if ids: - store.delete_documents(ids) + await store.delete_documents(ids) logger.info("Ask Indexing pipeline is clearing old documents...") - [_clear_documents(store) for store in self._stores] + await asyncio.gather(*[_clear_documents(store) for store in self._stores]) return {"mdl": mdl} @@ -315,12 +317,29 @@ def _convert_metrics(self, metrics: List[Dict[str, Any]]) -> List[str]: return ddl_commands +@component +class AsyncDocumentWriter(DocumentWriter): + @component.output_types(documents_written=int) + async def run( + self, documents: List[Document], policy: Optional[DuplicatePolicy] = None + ): + if policy is None: + policy = self.policy + + documents_written = await self.document_store.write_documents( + documents=documents, policy=policy + ) + return {"documents_written": documents_written} + + ## Start of Pipeline -@timer +@async_timer @observe(capture_input=False, capture_output=False) -def clean_document_store(mdl_str: str, cleaner: DocumentCleaner) -> Dict[str, Any]: +async def clean_document_store( + mdl_str: str, cleaner: DocumentCleaner +) -> Dict[str, Any]: logger.debug(f"input in clean_document_store: {mdl_str}") - return cleaner.run(mdl=mdl_str) + return await cleaner.run(mdl=mdl_str) @timer @@ -357,10 +376,10 @@ async def embed_ddl( return await ddl_embedder.run(documents=convert_to_ddl["documents"]) -@timer +@async_timer @observe(capture_input=False) -def write_ddl(embed_ddl: Dict[str, Any], ddl_writer: DocumentWriter) -> None: - return ddl_writer.run(documents=embed_ddl["documents"]) +async def write_ddl(embed_ddl: Dict[str, Any], ddl_writer: DocumentWriter) -> None: + return await ddl_writer.run(documents=embed_ddl["documents"]) @timer @@ -385,10 +404,10 @@ async def embed_view( return await view_embedder.run(documents=convert_to_view["documents"]) -@timer +@async_timer @observe(capture_input=False) -def write_view(embed_view: Dict[str, Any], view_writer: DocumentWriter) -> None: - return view_writer.run(documents=embed_view["documents"]) +async def write_view(embed_view: Dict[str, Any], view_writer: DocumentWriter) -> None: + return await view_writer.run(documents=embed_view["documents"]) ## End of Pipeline @@ -408,13 +427,13 @@ def __init__( self.ddl_converter = DDLConverter() self.ddl_embedder = embedder_provider.get_document_embedder() - self.ddl_writer = DocumentWriter( + self.ddl_writer = AsyncDocumentWriter( document_store=ddl_store, policy=DuplicatePolicy.OVERWRITE, ) self.view_converter = ViewConverter() self.view_embedder = embedder_provider.get_document_embedder() - self.view_writer = DocumentWriter( + self.view_writer = AsyncDocumentWriter( document_store=view_store, policy=DuplicatePolicy.OVERWRITE, ) diff --git a/wren-ai-service/src/providers/document_store/qdrant.py b/wren-ai-service/src/providers/document_store/qdrant.py index e440e9b7d..64bf6ad69 100644 --- a/wren-ai-service/src/providers/document_store/qdrant.py +++ b/wren-ai-service/src/providers/document_store/qdrant.py @@ -5,17 +5,24 @@ import numpy as np import qdrant_client from haystack import Document, component +from haystack.document_stores.types import DuplicatePolicy from haystack.utils import Secret from haystack_integrations.components.retrievers.qdrant import QdrantEmbeddingRetriever -from haystack_integrations.document_stores.qdrant import QdrantDocumentStore +from haystack_integrations.document_stores.qdrant import ( + QdrantDocumentStore, + document_store, +) from haystack_integrations.document_stores.qdrant.converters import ( DENSE_VECTORS_NAME, + convert_haystack_documents_to_qdrant_points, + convert_id, convert_qdrant_point_to_haystack_document, ) from haystack_integrations.document_stores.qdrant.filters import ( convert_filters_to_qdrant, ) from qdrant_client.http import models as rest +from tqdm import tqdm from src.core.provider import DocumentStoreProvider from src.providers.loader import get_default_embedding_model_dim, provider @@ -156,6 +163,72 @@ async def _query_by_embedding( document.score = score return results + async def delete_documents(self, ids: List[str]): + ids = [convert_id(_id) for _id in ids] + try: + await self.async_client.delete( + collection_name=self.index, + points_selector=ids, + wait=self.wait_result_from_api, + ) + except KeyError: + logger.warning( + "Called QdrantDocumentStore.delete_documents() on a non-existing ID", + ) + + async def count_documents(self) -> int: + return (await self.async_client.count(collection_name=self.index)).count + + async def write_documents( + self, documents: List[Document], policy: DuplicatePolicy = DuplicatePolicy.FAIL + ): + for doc in documents: + if not isinstance(doc, Document): + msg = f"DocumentStore.write_documents() expects a list of Documents but got an element of {type(doc)}." + raise ValueError(msg) + + self._set_up_collection( + self.index, + self.embedding_dim, + False, + self.similarity, + self.use_sparse_embeddings, + ) + + if len(documents) == 0: + logger.warning( + "Calling QdrantDocumentStore.write_documents() with empty list" + ) + return + + document_objects = self._handle_duplicate_documents( + documents=documents, + index=self.index, + policy=policy, + ) + + batched_documents = document_store.get_batches_from_generator( + document_objects, self.write_batch_size + ) + with tqdm( + total=len(document_objects), disable=not self.progress_bar + ) as progress_bar: + for document_batch in batched_documents: + batch = convert_haystack_documents_to_qdrant_points( + document_batch, + embedding_field=self.embedding_field, + use_sparse_embeddings=self.use_sparse_embeddings, + ) + + await self.async_client.upsert( + collection_name=self.index, + points=batch, + wait=self.wait_result_from_api, + ) + + progress_bar.update(self.write_batch_size) + return len(document_objects) + class AsyncQdrantEmbeddingRetriever(QdrantEmbeddingRetriever): def __init__( diff --git a/wren-ai-service/tests/pytest/pipelines/test_ask.py b/wren-ai-service/tests/pytest/pipelines/test_ask.py index e49f06bd6..8c520f908 100644 --- a/wren-ai-service/tests/pytest/pipelines/test_ask.py +++ b/wren-ai-service/tests/pytest/pipelines/test_ask.py @@ -4,7 +4,6 @@ import pytest from src.core.engine import EngineConfig -from src.core.pipeline import async_validate from src.core.provider import DocumentStoreProvider, EmbedderProvider from src.pipelines.ask.followup_generation import FollowUpGeneration from src.pipelines.ask.generation import Generation @@ -46,7 +45,8 @@ def document_store_provider(): return document_store_provider -def test_clear_documents(mdl_str: str): +@pytest.mark.asyncio +async def test_clear_documents(mdl_str: str): _, embedder_provider, document_store_provider, _ = init_providers(EngineConfig()) store = document_store_provider.get_store() @@ -55,13 +55,12 @@ def test_clear_documents(mdl_str: str): document_store_provider=document_store_provider, ) - async_validate(lambda: indexing_pipeline.run(mdl_str)) + await indexing_pipeline.run(mdl_str) - assert store.count_documents() == 3 + assert await store.count_documents() == 3 - async_validate( - lambda: indexing_pipeline.run( - """ + await indexing_pipeline.run( + """ { "models": [], "relationships": [], @@ -78,13 +77,13 @@ def test_clear_documents(mdl_str: str): ] } """ - ) ) - assert store.count_documents() == 1 + assert await store.count_documents() == 1 -def test_indexing_pipeline( +@pytest.mark.asyncio +async def test_indexing_pipeline( mdl_str: str, embedder_provider: EmbedderProvider, document_store_provider: DocumentStoreProvider, @@ -94,18 +93,19 @@ def test_indexing_pipeline( document_store_provider=document_store_provider, ) - async_validate(lambda: indexing_pipeline.run(mdl_str)) + await indexing_pipeline.run(mdl_str) - assert document_store_provider.get_store().count_documents() == 3 + assert await document_store_provider.get_store().count_documents() == 3 assert ( - document_store_provider.get_store( + await document_store_provider.get_store( dataset_name="view_questions", ).count_documents() == 1 ) -def test_retrieval_pipeline( +@pytest.mark.asyncio +async def test_retrieval_pipeline( embedder_provider: EmbedderProvider, document_store_provider: DocumentStoreProvider, ): @@ -114,10 +114,8 @@ def test_retrieval_pipeline( document_store_provider=document_store_provider, ) - retrieval_result = async_validate( - lambda: retrieval_pipeline.run( - "How many books are there?", - ) + retrieval_result = await retrieval_pipeline.run( + "How many books are there?", ) assert retrieval_result is not None @@ -126,52 +124,48 @@ def test_retrieval_pipeline( GLOBAL_DATA["contexts"] = retrieval_result["retrieval"]["documents"] -def test_generation_pipeline(): +@pytest.mark.asyncio +async def test_generation_pipeline(): llm_provider, _, _, engine = init_providers(EngineConfig()) generation_pipeline = Generation(llm_provider=llm_provider, engine=engine) - generation_result = async_validate( - lambda: generation_pipeline.run( - "How many authors are there?", - contexts=GLOBAL_DATA["contexts"], - exclude=[], - ) + generation_result = await generation_pipeline.run( + "How many authors are there?", + contexts=GLOBAL_DATA["contexts"], + exclude=[], ) # todo: we'll refactor almost all test case with a mock server, thus temporarily only assert it is not None. assert generation_result["post_process"]["valid_generation_results"] is not None assert generation_result["post_process"]["invalid_generation_results"] is not None - generation_result = async_validate( - lambda: generation_pipeline.run( - "How many authors are there?", - contexts=GLOBAL_DATA["contexts"], - exclude=[{"statement": "SELECT 1 FROM author"}], - ) + generation_result = await generation_pipeline.run( + "How many authors are there?", + contexts=GLOBAL_DATA["contexts"], + exclude=[{"statement": "SELECT 1 FROM author"}], ) assert generation_result["post_process"]["valid_generation_results"] is not None assert generation_result["post_process"]["invalid_generation_results"] is not None -def test_followup_generation_pipeline(): +@pytest.mark.asyncio +async def test_followup_generation_pipeline(): llm_provider, _, _, engine = init_providers(EngineConfig()) generation_pipeline = FollowUpGeneration(llm_provider=llm_provider, engine=engine) - generation_result = async_validate( - lambda: generation_pipeline.run( - "What are names of the books?", - contexts=GLOBAL_DATA["contexts"], - history=AskRequest.AskResponseDetails( - sql="SELECT COUNT(*) FROM book", - summary="Retrieve the number of books", - steps=[ - SQLExplanation( - sql="SELECT COUNT(*) FROM book", - summary="Retrieve the number of books", - cte_name="", - ) - ], - ), - ) + generation_result = await generation_pipeline.run( + "What are names of the books?", + contexts=GLOBAL_DATA["contexts"], + history=AskRequest.AskResponseDetails( + sql="SELECT COUNT(*) FROM book", + summary="Retrieve the number of books", + steps=[ + SQLExplanation( + sql="SELECT COUNT(*) FROM book", + summary="Retrieve the number of books", + cte_name="", + ) + ], + ), ) # todo: we'll refactor almost all test case with a mock server, thus temporarily only assert it is not None. @@ -179,21 +173,20 @@ def test_followup_generation_pipeline(): assert generation_result["post_process"]["invalid_generation_results"] is not None -def test_sql_correction_pipeline(): +@pytest.mark.asyncio +async def test_sql_correction_pipeline(): llm_provider, _, _, engine = init_providers(EngineConfig()) sql_correction_pipeline = SQLCorrection(llm_provider=llm_provider, engine=engine) - sql_correction_result = async_validate( - lambda: sql_correction_pipeline.run( - contexts=GLOBAL_DATA["contexts"], - invalid_generation_results=[ - { - "sql": "Select count(*) from books", - "summary": "Retrieve the number of books", - "error": 'ERROR: com.google.cloud.bigquery.BigQueryException: Table "books" must be qualified with a dataset (e.g. dataset.table).', - } - ], - ) + sql_correction_result = await sql_correction_pipeline.run( + contexts=GLOBAL_DATA["contexts"], + invalid_generation_results=[ + { + "sql": "Select count(*) from books", + "summary": "Retrieve the number of books", + "error": 'ERROR: com.google.cloud.bigquery.BigQueryException: Table "books" must be qualified with a dataset (e.g. dataset.table).', + } + ], ) assert isinstance( diff --git a/wren-ai-service/tests/pytest/pipelines/test_ask_details.py b/wren-ai-service/tests/pytest/pipelines/test_ask_details.py index 1146c00b1..9a9f64ccf 100644 --- a/wren-ai-service/tests/pytest/pipelines/test_ask_details.py +++ b/wren-ai-service/tests/pytest/pipelines/test_ask_details.py @@ -1,10 +1,12 @@ +import pytest + from src.core.engine import EngineConfig -from src.core.pipeline import async_validate from src.pipelines.ask_details.generation import Generation from src.utils import init_providers -def test_generation_pipeline_producing_executable_sqls(): +@pytest.mark.asyncio +async def test_generation_pipeline_producing_executable_sqls(): llm_provider, _, _, engine = init_providers(EngineConfig()) generation_pipeline = Generation( llm_provider=llm_provider, @@ -25,8 +27,6 @@ def test_generation_pipeline_producing_executable_sqls(): ] for candidate_sql_query in candidate_sql_queries: - assert async_validate( - lambda: generation_pipeline.run( - candidate_sql_query, - ) + assert await generation_pipeline.run( + candidate_sql_query, ) diff --git a/wren-ai-service/tests/pytest/pipelines/test_document_cleaner.py b/wren-ai-service/tests/pytest/pipelines/test_document_cleaner.py index e3025f362..ea0b3ec69 100644 --- a/wren-ai-service/tests/pytest/pipelines/test_document_cleaner.py +++ b/wren-ai-service/tests/pytest/pipelines/test_document_cleaner.py @@ -1,3 +1,4 @@ +import pytest from haystack import Document from haystack.document_stores.types import DocumentStore @@ -6,7 +7,8 @@ from src.utils import init_providers -def _mock_store(name: str = "default") -> DocumentStore: +@pytest.mark.asyncio +async def _mock_store(name: str = "default") -> DocumentStore: _, _, document_store_provider, _ = init_providers(EngineConfig()) store = document_store_provider.get_store( embedding_model_dim=5, @@ -14,37 +16,40 @@ def _mock_store(name: str = "default") -> DocumentStore: recreate_index=True, ) - store.write_documents( + await store.write_documents( [ Document(id=str(0), content="This is first", embedding=[0.0] * 5), Document(id=str(1), content="This is second", embedding=[0.1] * 5), ] ) - assert store.count_documents() == 2 + assert (await store.count_documents()) == 2 return store -def test_clear_document(): - store = _mock_store() +@pytest.mark.asyncio +async def test_clear_document(): + store = await _mock_store() cleaner = DocumentCleaner([store]) - cleaner.run(mdl="{}") - assert store.count_documents() == 0 + await cleaner.run(mdl="{}") + assert await store.count_documents() == 0 -def test_clear_multi_stores(): - foo_store = _mock_store("foo") - bar_store = _mock_store("bar") +@pytest.mark.asyncio +async def test_clear_multi_stores(): + foo_store = await _mock_store("foo") + bar_store = await _mock_store("bar") cleaner = DocumentCleaner([foo_store, bar_store]) - cleaner.run(mdl="{}") - assert foo_store.count_documents() == 0 - assert bar_store.count_documents() == 0 + await cleaner.run(mdl="{}") + assert await foo_store.count_documents() == 0 + assert await bar_store.count_documents() == 0 -def test_component_output(): - store = _mock_store() +@pytest.mark.asyncio +async def test_component_output(): + store = await _mock_store() cleaner = DocumentCleaner([store]) - res = cleaner.run(mdl="{}") + res = await cleaner.run(mdl="{}") assert res == {"mdl": "{}"} diff --git a/wren-ai-service/tests/pytest/services/test_ask.py b/wren-ai-service/tests/pytest/services/test_ask.py index 2cac5d701..6212180a5 100644 --- a/wren-ai-service/tests/pytest/services/test_ask.py +++ b/wren-ai-service/tests/pytest/services/test_ask.py @@ -5,7 +5,6 @@ import pytest from src.core.engine import EngineConfig -from src.core.pipeline import async_validate from src.pipelines.ask import ( generation, historical_question, @@ -60,14 +59,13 @@ def mdl_str(): return orjson.dumps(json.load(f)).decode("utf-8") -def test_ask_with_successful_query(ask_service: AskService, mdl_str: str): +@pytest.mark.asyncio +async def test_ask_with_successful_query(ask_service: AskService, mdl_str: str): id = str(uuid.uuid4()) - async_validate( - lambda: ask_service.prepare_semantics( - SemanticsPreparationRequest( - mdl=mdl_str, - id=id, - ) + await ask_service.prepare_semantics( + SemanticsPreparationRequest( + mdl=mdl_str, + id=id, ) ) @@ -78,7 +76,7 @@ def test_ask_with_successful_query(ask_service: AskService, mdl_str: str): id=id, ) ask_request.query_id = query_id - async_validate(lambda: ask_service.ask(ask_request)) + await ask_service.ask(ask_request) # getting ask result ask_result_response = ask_service.get_ask_result( diff --git a/wren-ai-service/tests/pytest/services/test_ask_details.py b/wren-ai-service/tests/pytest/services/test_ask_details.py index e5d567410..f5d21c61a 100644 --- a/wren-ai-service/tests/pytest/services/test_ask_details.py +++ b/wren-ai-service/tests/pytest/services/test_ask_details.py @@ -3,7 +3,6 @@ import pytest from src.core.engine import EngineConfig -from src.core.pipeline import async_validate from src.pipelines.ask_details import generation from src.utils import init_providers from src.web.v1.services.ask_details import ( @@ -27,7 +26,8 @@ def ask_details_service(): # TODO: we may need to add one more test for the case that steps must be more than 1 -def test_ask_details_with_successful_sql(ask_details_service: AskDetailsService): +@pytest.mark.asyncio +async def test_ask_details_with_successful_sql(ask_details_service: AskDetailsService): # asking details query_id = str(uuid.uuid4()) sql = "SELECT * FROM book" @@ -37,7 +37,7 @@ def test_ask_details_with_successful_sql(ask_details_service: AskDetailsService) summary="This is a summary", ) ask_details_request.query_id = query_id - async_validate(lambda: ask_details_service.ask_details(ask_details_request)) + await ask_details_service.ask_details(ask_details_request) # getting ask details result ask_details_result_response = ask_details_service.get_ask_details_result( @@ -68,7 +68,8 @@ def test_ask_details_with_successful_sql(ask_details_service: AskDetailsService) assert ask_details_result_response.response.steps[-1].cte_name == "" -def test_ask_details_with_failed_sql(ask_details_service: AskDetailsService): +@pytest.mark.asyncio +async def test_ask_details_with_failed_sql(ask_details_service: AskDetailsService): # asking details query_id = str(uuid.uuid4()) sql = 'SELECT * FROM "xxx"' @@ -79,7 +80,7 @@ def test_ask_details_with_failed_sql(ask_details_service: AskDetailsService): summary=summary, ) ask_details_request.query_id = query_id - async_validate(lambda: ask_details_service.ask_details(ask_details_request)) + await ask_details_service.ask_details(ask_details_request) # getting ask details result ask_details_result_response = ask_details_service.get_ask_details_result(