Skip to content

[router] add get server info and get model info in grpc server#11303

Merged
slin1237 merged 5 commits intomainfrom
grpc-get
Oct 7, 2025
Merged

[router] add get server info and get model info in grpc server#11303
slin1237 merged 5 commits intomainfrom
grpc-get

Conversation

@slin1237
Copy link
Collaborator

@slin1237 slin1237 commented Oct 7, 2025

Changes

  1. add new methods for get server info and get model info
  2. updates protobuf to support those new endpoints

Note

internal state is purposely ignored in get server info
this will be streamed back in each token generation in the future

Validations

grpcurl -plaintext localhost:30000 sglang.grpc.scheduler.SglangScheduler/GetModelInfo
{
  "model_path": "/raid/models/meta-llama/Llama-3.1-8B-Instruct",
  "tokenizer_path": "/raid/models/meta-llama/Llama-3.1-8B-Instruct",
  "is_generation": true,
  "weight_version": "default",
  "served_model_name": "/raid/models/meta-llama/Llama-3.1-8B-Instruct",
  "max_context_length": 414382,
  "vocab_size": 128256,
  "model_type": "transformer",
  "bos_token_id": 1,
  "max_req_input_len": 131066
}
grpcurl -plaintext localhost:30000 sglang.grpc.scheduler.SglangScheduler/GetServerInfo
{
  "server_args": {
    "allow_auto_truncate": false,
    "api_key": null,
    "attention_backend": null,
    "base_gpu_id": 0,
    "bucket_e2e_request_latency": null,
    "bucket_inter_token_latency": null,
    "bucket_time_to_first_token": null,
    "chat_template": null,
    "chunked_prefill_size": 8192,
    "collect_tokens_histogram": false,
    "completion_template": null,
    "constrained_json_whitespace_pattern": null,
    "context_length": null,
    "cpu_offload_gb": 0,
    "crash_dump_folder": null,
    "cuda_graph_bs": [
      1,
      2,
      4,
      8,
      12,
      16,
      24,
      32,
      40,
      48,
      56,
      64,
      72,
      80,
      88,
      96,
      104,
      112,
      120,
      128,
      136,
      144,
      152,
      160,
      168,
      176,
      184,
      192,
      200,
      208,
      216,
      224,
      232,
      240,
      248,
      256
    ],
    "cuda_graph_max_bs": 256,
    "custom_weight_loader": [],
    "debug_tensor_dump_inject": false,
    "debug_tensor_dump_input_file": null,
    "debug_tensor_dump_output_folder": null,
    "debug_tensor_dump_prefill_only": false,
    "decode_attention_backend": null,
    "decode_log_interval": 40,
    "deepep_config": null,
    "deepep_mode": "auto",
    "delete_ckpt_after_loading": false,
    "device": "cuda",
    "disable_chunked_prefix_cache": false,
    "disable_cuda_graph": false,
    "disable_cuda_graph_padding": false,
    "disable_custom_all_reduce": false,
    "disable_fast_image_processor": false,
    "disable_flashinfer_cutlass_moe_fp4_allgather": false,
    "disable_hybrid_swa_memory": false,
    "disable_outlines_disk_cache": false,
    "disable_overlap_schedule": false,
    "disable_radix_cache": false,
    "disable_shared_experts_fusion": false,
    "disaggregation_bootstrap_port": 8998,
    "disaggregation_decode_dp": null,
    "disaggregation_decode_enable_offload_kvcache": false,
    "disaggregation_decode_polling_interval": 1,
    "disaggregation_decode_tp": null,
    "disaggregation_ib_device": null,
    "disaggregation_mode": "null",
    "disaggregation_prefill_pp": 1,
    "disaggregation_transfer_backend": "mooncake",
    "dist_init_addr": null,
    "dist_timeout": null,
    "download_dir": null,
    "dp_size": 1,
    "ds_channel_config_path": null,
    "ds_heavy_channel_num": 32,
    "ds_heavy_channel_type": "qk",
    "ds_heavy_token_num": 256,
    "ds_sparse_decode_threshold": 4096,
    "dtype": "auto",
    "dynamic_batch_tokenizer_batch_size": 32,
    "dynamic_batch_tokenizer_batch_timeout": 0.002,
    "enable_cache_report": false,
    "enable_cudagraph_gc": false,
    "enable_custom_logit_processor": false,
    "enable_deterministic_inference": false,
    "enable_double_sparsity": false,
    "enable_dp_attention": false,
    "enable_dp_lm_head": false,
    "enable_dynamic_batch_tokenizer": false,
    "enable_eplb": false,
    "enable_expert_distribution_metrics": false,
    "enable_flashinfer_allreduce_fusion": false,
    "enable_fp32_lm_head": false,
    "enable_hierarchical_cache": false,
    "enable_lmcache": false,
    "enable_lora": null,
    "enable_memory_saver": false,
    "enable_metrics": false,
    "enable_metrics_for_all_schedulers": false,
    "enable_mixed_chunk": false,
    "enable_mscclpp": false,
    "enable_multimodal": null,
    "enable_nan_detection": false,
    "enable_nccl_nvls": false,
    "enable_p2p_check": false,
    "enable_pdmux": false,
    "enable_priority_scheduling": false,
    "enable_profile_cuda_graph": false,
    "enable_request_time_stats_logging": false,
    "enable_return_hidden_states": false,
    "enable_single_batch_overlap": false,
    "enable_symm_mem": false,
    "enable_tokenizer_batch_encode": false,
    "enable_torch_compile": false,
    "enable_torch_symm_mem": false,
    "enable_trace": false,
    "enable_two_batch_overlap": false,
    "enable_weights_cpu_backup": false,
    "ep_dispatch_algorithm": "static",
    "ep_num_redundant_experts": 0,
    "ep_size": 1,
    "eplb_algorithm": "auto",
    "eplb_min_rebalancing_utilization_threshold": 1,
    "eplb_rebalance_layers_per_chunk": null,
    "eplb_rebalance_num_iterations": 1000,
    "expert_distribution_recorder_buffer_size": 1000,
    "expert_distribution_recorder_mode": null,
    "file_storage_path": "sglang_storage",
    "flashinfer_mla_disable_ragged": false,
    "flashinfer_mxfp4_moe_precision": "default",
    "gc_warning_threshold_secs": 0,
    "generation_tokens_buckets": null,
    "gpu_id_step": 1,
    "grammar_backend": "xgrammar",
    "hicache_io_backend": "kernel",
    "hicache_mem_layout": "layer_first",
    "hicache_ratio": 2,
    "hicache_size": 0,
    "hicache_storage_backend": null,
    "hicache_storage_backend_extra_config": null,
    "hicache_storage_prefetch_policy": "best_effort",
    "hicache_write_policy": "write_through",
    "host": "127.0.0.1",
    "hybrid_kvcache_ratio": null,
    "init_expert_location": "trivial",
    "is_embedding": false,
    "json_model_override_args": "{}",
    "keep_mm_feature_on_device": false,
    "kv_cache_dtype": "auto",
    "kv_events_config": null,
    "load_balance_method": "round_robin",
    "load_format": "auto",
    "load_watch_interval": 0.1,
    "log_level": "info",
    "log_level_http": null,
    "log_requests": false,
    "log_requests_level": 2,
    "lora_backend": "triton",
    "lora_paths": null,
    "lora_target_modules": null,
    "mamba_ssm_dtype": "float32",
    "max_loaded_loras": null,
    "max_lora_chunk_size": 16,
    "max_lora_rank": null,
    "max_loras_per_batch": 8,
    "max_mamba_cache_size": null,
    "max_prefill_tokens": 16384,
    "max_queued_requests": null,
    "max_running_requests": null,
    "max_total_tokens": null,
    "mem_fraction_static": 0.835,
    "mm_attention_backend": null,
    "model_impl": "auto",
    "model_loader_extra_config": "{}",
    "model_path": "/raid/models/meta-llama/Llama-3.1-8B-Instruct",
    "modelopt_quant": null,
    "moe_a2a_backend": "none",
    "moe_dense_tp_size": null,
    "moe_runner_backend": "auto",
    "nccl_port": null,
    "nnodes": 1,
    "node_rank": 0,
    "nsa_decode": "fa3",
    "nsa_prefill": "flashmla_prefill",
    "num_continuous_decode_steps": 1,
    "num_reserved_decode_tokens": 512,
    "numa_node": null,
    "offload_group_size": -1,
    "offload_mode": "cpu",
    "offload_num_in_group": 1,
    "offload_prefetch_step": 1,
    "oltp_traces_endpoint": "localhost:4317",
    "page_size": 1,
    "port": 30000,
    "pp_max_micro_batch_size": null,
    "pp_size": 1,
    "preferred_sampling_params": null,
    "prefill_attention_backend": null,
    "prefill_round_robin_balance": false,
    "priority_scheduling_preemption_threshold": 10,
    "prompt_tokens_buckets": null,
    "quantization": null,
    "quantization_param_path": null,
    "radix_eviction_policy": "lru",
    "random_seed": 666439528,
    "reasoning_parser": null,
    "remote_instance_weight_loader_seed_instance_ip": null,
    "remote_instance_weight_loader_seed_instance_service_port": null,
    "remote_instance_weight_loader_send_weights_group_ports": null,
    "revision": null,
    "sampling_backend": "flashinfer",
    "schedule_conservativeness": 1,
    "schedule_low_priority_values_first": false,
    "schedule_policy": "fcfs",
    "scheduler_recv_interval": 1,
    "served_model_name": "/raid/models/meta-llama/Llama-3.1-8B-Instruct",
    "show_time_cost": false,
    "skip_server_warmup": false,
    "skip_tokenizer_init": false,
    "sleep_on_idle": false,
    "sm_group_num": 3,
    "speculative_accept_threshold_acc": 1,
    "speculative_accept_threshold_single": 1,
    "speculative_algorithm": null,
    "speculative_attention_mode": "prefill",
    "speculative_draft_model_path": null,
    "speculative_draft_model_revision": null,
    "speculative_eagle_topk": null,
    "speculative_ngram_branch_length": 18,
    "speculative_ngram_capacity": 10000000,
    "speculative_ngram_match_type": "BFS",
    "speculative_ngram_max_bfs_breadth": 10,
    "speculative_ngram_max_match_window_size": 12,
    "speculative_ngram_min_bfs_breadth": 1,
    "speculative_ngram_min_match_window_size": 1,
    "speculative_num_draft_tokens": null,
    "speculative_num_steps": null,
    "speculative_token_map": null,
    "stream_interval": 1,
    "stream_output": false,
    "swa_full_tokens_ratio": 0.8,
    "tbo_token_distribution_threshold": 0.48,
    "tokenizer_metrics_allowed_custom_labels": null,
    "tokenizer_metrics_custom_labels_header": "x-custom-labels",
    "tokenizer_mode": "auto",
    "tokenizer_path": "/raid/models/meta-llama/Llama-3.1-8B-Instruct",
    "tokenizer_worker_num": 1,
    "tool_call_parser": null,
    "tool_server": null,
    "torch_compile_max_bs": 32,
    "torchao_config": "",
    "tp_size": 1,
    "triton_attention_num_kv_splits": 8,
    "triton_attention_reduce_in_fp32": false,
    "triton_attention_split_tile_size": null,
    "trust_remote_code": false,
    "warmups": null,
    "watchdog_timeout": 300,
    "weight_loader_disable_mmap": false,
    "weight_version": "default"
  },
  "scheduler_info": {
    "max_req_input_len": 131066,
    "max_total_num_tokens": 414382,
    "status": "ready"
  },
  "last_receive_timestamp": 1.7598484522728305e+09,
  "uptime_seconds": 601.3068659305573,
  "sglang_version": "0.5.3",
  "server_type": "grpc",
  "start_time": "2025-10-07T14:47:32Z"
}

Checklist

@gemini-code-assist
Copy link
Contributor

Summary of Changes

Hello @slin1237, I'm Gemini Code Assist1! I'm currently reviewing this pull request and will post my feedback shortly. In the meantime, here's a summary to help you and other reviewers quickly get up to speed!

This pull request significantly enhances the observability and manageability of the SGLang gRPC scheduler by adding dedicated endpoints to query its operational state. Clients can now programmatically retrieve detailed information about the deployed model and the server's current configuration and runtime metrics, facilitating better monitoring, debugging, and integration with external systems.

Highlights

  • New gRPC Endpoints: Introduced two new gRPC methods: GetModelInfo to retrieve detailed information about the loaded model, and GetServerInfo to obtain comprehensive server configuration and runtime status.
  • Protocol Buffer Updates: Updated the sglang_scheduler.proto definition to include the new GetModelInfoRequest, GetModelInfoResponse, GetServerInfoRequest, and GetServerInfoResponse messages, along with their corresponding RPC service definitions.
  • Server Information Details: The GetServerInfo method now provides server arguments, scheduler initialization information, active request count, pause status, last received timestamp, server uptime, SGLang version, server type, and start time.
  • Model Information Details: The GetModelInfo method exposes critical model parameters such as model path, tokenizer path, generation status, sampling parameters, weight version, served model name, context length, vocabulary size, vision support, model type, and token IDs (EOS, PAD, BOS).
Using Gemini Code Assist

The full guide for Gemini Code Assist can be found on our documentation page, here are some quick tips.

Invoking Gemini

You can request assistance from Gemini at any point by creating a comment using either /gemini <command> or @gemini-code-assist <command>. Below is a summary of the supported commands on the current page.

Feature Command Description
Code Review /gemini review Performs a code review for the current pull request in its current state.
Pull Request Summary /gemini summary Provides a summary of the current pull request in its current state.
Comment @gemini-code-assist Responds in comments when explicitly tagged, both in pull request comments and review comments.
Help /gemini help Displays a list of available commands.

Customization

To customize Gemini Code Assist for GitHub experience, repository maintainers can create a configuration file and/or provide a custom code review style guide (such as PEP-8 for Python) by creating and adding files to a .gemini/ folder in the base of the repository. Detailed instructions can be found here.

Limitations & Feedback

Gemini Code Assist may make mistakes. Please leave feedback on any instances where its feedback is incorrect or counter productive. You can react with 👍 and 👎 on @gemini-code-assist comments. If you're interested in giving your feedback about your experience with Gemini Code Assist for Github and other Google products, sign up here.

You can also get AI-powered code generation, chat, as well as code reviews directly in the IDE at no cost with the Gemini Code Assist IDE Extension.

Footnotes

  1. Review the Privacy Notices, Generative AI Prohibited Use Policy, Terms of Service, and learn how to configure Gemini Code Assist in GitHub here. Gemini can make mistakes, so double check it and use code with caution.

@slin1237 slin1237 changed the title Grpc get [router] add get server info and get model info in grpc server Oct 7, 2025
@slin1237 slin1237 added the router label Oct 7, 2025
Copy link
Contributor

@gemini-code-assist gemini-code-assist bot left a comment

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Code Review

This pull request introduces two new gRPC endpoints, GetModelInfo and GetServerInfo, to expose model and server information. The changes include updating the protobuf definitions and implementing the corresponding servicer methods. The implementation is mostly correct, but I've found an issue in the serialization logic for server arguments which could lead to malformed data for fields containing sets. My feedback includes a suggestion to fix this.

@slin1237 slin1237 requested a review from ByronHsu as a code owner October 7, 2025 15:29
@slin1237 slin1237 merged commit 2fcd56e into main Oct 7, 2025
20 of 61 checks passed
@slin1237 slin1237 deleted the grpc-get branch October 7, 2025 15:36
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment

Projects

None yet

Development

Successfully merging this pull request may close these issues.

3 participants

Comments