{"payload":{"pageCount":1,"repositories":[{"type":"Public","name":"Co-LLM-Agents","owner":"UMass-Foundation-Model","isFork":false,"description":"[ICLR 2024] Source codes for the paper \"Building Cooperative Embodied Agents Modularly with Large Language Models\"","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":6,"starsCount":208,"forksCount":29,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-08-26T15:30:49.058Z"}},{"type":"Public","name":"FlexAttention","owner":"UMass-Foundation-Model","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":3,"starsCount":19,"forksCount":4,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,8,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-30T03:59:52.936Z"}},{"type":"Public","name":"VisualCoT","owner":"UMass-Foundation-Model","isFork":false,"description":"Codebase for AAAI 2024 conference paper Visual Chain-of-Thought Prompting for Knowledge-based Visual Reasoning","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":12,"forksCount":1,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-21T04:11:36.168Z"}},{"type":"Public","name":"3D-VLA","owner":"UMass-Foundation-Model","isFork":false,"description":"[ICML 2024] 3D-VLA: A 3D Vision-Language-Action Generative World Model","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":6,"starsCount":310,"forksCount":11,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-09T05:21:36.192Z"}},{"type":"Public","name":"3D-LLM","owner":"UMass-Foundation-Model","isFork":false,"description":"Code for 3D-LLM: Injecting the 3D World into Large Language Models","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":1,"issueCount":27,"starsCount":895,"forksCount":55,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-06T19:14:00.267Z"}},{"type":"Public","name":"RapVerse","owner":"UMass-Foundation-Model","isFork":false,"description":"Code for paper \"RapVerse: Coherent Vocals and Whole-Body Motions Generations from Text\"","allTopics":[],"primaryLanguage":null,"pullRequestCount":0,"issueCount":1,"starsCount":13,"forksCount":0,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-30T13:03:04.299Z"}},{"type":"Public","name":"HAZARD","owner":"UMass-Foundation-Model","isFork":false,"description":"HAZARD challenge","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":25,"forksCount":2,"license":"BSD 2-Clause \"Simplified\" License","participation":[0,3,2,0,0,0,0,0,0,4,0,0,0,0,0,2,0,0,4,0,0,0,2,0,1,0,0,0,0,0,0,2,1,4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-05T12:05:19.989Z"}},{"type":"Public","name":"COMBO","owner":"UMass-Foundation-Model","isFork":false,"description":"Source codes for the paper \"COMBO: Compositional World Models for Embodied Multi-Agent Cooperation\"","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":1,"starsCount":25,"forksCount":3,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-04-17T01:58:24.847Z"}},{"type":"Public","name":"MultiPLY","owner":"UMass-Foundation-Model","isFork":false,"description":"Code for MultiPLY: A Multisensory Object-Centric Embodied Large Language Model in 3D World","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":5,"starsCount":115,"forksCount":6,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-03-17T18:29:54.981Z"}},{"type":"Public","name":"genome","owner":"UMass-Foundation-Model","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":3,"starsCount":15,"forksCount":1,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-11-10T01:25:27.701Z"}},{"type":"Public","name":"CoVLM","owner":"UMass-Foundation-Model","isFork":false,"description":"Official implementation for CoVLM: Composing Visual Entities and Relationships in Large Language Models Via Communicative Decoding","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":1,"starsCount":40,"forksCount":4,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-11-07T17:35:47.889Z"}},{"type":"Public","name":"Mod-Squad","owner":"UMass-Foundation-Model","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":1,"starsCount":70,"forksCount":5,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-04-03T08:16:35.637Z"}}],"repositoryCount":12,"userInfo":null,"searchable":true,"definitions":[],"typeFilters":[{"id":"all","text":"All"},{"id":"public","text":"Public"},{"id":"source","text":"Sources"},{"id":"fork","text":"Forks"},{"id":"archived","text":"Archived"},{"id":"template","text":"Templates"}],"compactMode":false},"title":"UMass-Foundation-Model repositories"}