@@ -578,30 +578,33 @@ def test_logs_readonly_insights_query(start_aws_proxy, cleanups):
578578 ],
579579 )
580580
581- # start_query and get_query_results through local client (proxied)
582- # should work in read-only mode - use retry to wait for query completion
583- def _run_insights_query ():
584- start_time = int ((time .time () - 300 ) * 1000 ) # 5 minutes ago
585- end_time = int ((time .time () + 60 ) * 1000 ) # 1 minute from now
581+ # start_query through local client (proxied) - should work in read-only mode
582+ start_time = int ((time .time () - 300 ) * 1000 ) # 5 minutes ago
583+ end_time = int ((time .time () + 60 ) * 1000 ) # 1 minute from now
586584
585+ def _start_query ():
587586 query_response = logs_client .start_query (
588587 logGroupName = log_group_name ,
589588 startTime = start_time ,
590589 endTime = end_time ,
591590 queryString = "fields @timestamp, @message | limit 10" ,
592591 )
593- query_id = query_response ["queryId" ]
594- assert query_id is not None
592+ return query_response ["queryId" ]
595593
596- # get_query_results - poll until complete
594+ # Retry start_query in case log events aren't indexed yet
595+ query_id = retry (_start_query , retries = 10 , sleep = 3 )
596+ assert query_id is not None
597+
598+ # get_query_results through local client (proxied) - poll until complete
599+ def _get_query_results ():
597600 results = logs_client .get_query_results (queryId = query_id )
598601 if results ["status" ] not in ["Complete" , "Failed" , "Cancelled" ]:
599602 raise AssertionError (f"Query not complete yet: { results ['status' ]} " )
600603 if results ["status" ] != "Complete" or len (results ["results" ]) < 1 :
601604 raise AssertionError ("Query completed but no results found yet" )
602605 return results
603606
604- results = retry (_run_insights_query , retries = 30 , sleep = 2 )
607+ results = retry (_get_query_results , retries = 30 , sleep = 2 )
605608 assert results ["status" ] == "Complete"
606609 assert len (results ["results" ]) >= 1
607610
0 commit comments