Recent advances in vision-language-action (VLA) models have shown promise in integrating image generation with action prediction to improve generalization and reasoning in robot manipulation. However, existing methods are limited to challenging image-based forecasting, which suffers from redundant information and lacks comprehensive and critical world knowledge, including dynamic, spatial and semantic information. To address these limitations, we propose DreamVLA, a novel VLA framework that integrates comprehensive world knowledge forecasting to enable inverse dynamics modeling, thereby establishing a perception-prediction-action loop for manipulation tasks. Specifically, DreamVLA introduces a dynamic-region-guided world knowledge prediction, integrated with the spatial and semantic cues, which provide compact yet comprehensive representations for action planning. This design aligns with how humans interact with the world by first forming abstract multimodal reasoning chains before acting. To mitigate interference among the dynamic, spatial and semantic information during training, we adopt a block-wise structured attention mechanism that masks their mutual attention, preventing information leakage and keeping each representation clean and disentangled. Moreover, to model the conditional distribution over future actions, we employ a diffusion-based transformer that disentangles action represen- tations from shared latent features. Extensive experiments on both real-world and simulation environments demonstrate that DreamVLA achieves 76.7% success rate on real robot tasks and 4.44 average length on the CALVIN ABC-D benchmarks.
@article{dreamvla25,
author = {Wenyao Zhang and
Hongsi Liu and
Zekun Qi and
Yunan Wang and
Xinqiang Yu and
Jiazhao Zhang and
Runpei Dong and
Jiawei He and
He Wang and
Zhizheng Zhang and
Li Yi and
Wenjun Zeng and
Xin Jin},
title = {DreamVLA: A Vision-Language-Action Model Dreamed with Comprehensive World Knowledge},
journal = {CoRR},
volume = {abs/2507.04447},
year = {2025},
url = {https://doi.org/10.48550/arXiv.2507.04447},
doi = {10.48550/ARXIV.2507.04447},
eprinttype = {arXiv},
eprint = {2507.04447}
}