{"payload":{"pageCount":1,"repositories":[{"type":"Public","name":"LLaVA-NeXT","owner":"LLaVA-VL","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":8,"issueCount":157,"starsCount":2370,"forksCount":163,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-09-12T02:45:12.547Z"}},{"type":"Public","name":"blog","owner":"LLaVA-VL","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"HTML","color":"#e34c26"},"pullRequestCount":0,"issueCount":0,"starsCount":1,"forksCount":3,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-08-17T03:00:37.431Z"}},{"type":"Public","name":"LLaVA-Interactive-Demo","owner":"LLaVA-VL","isFork":false,"description":"LLaVA-Interactive-Demo","allTopics":["multimodal","lmm"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":3,"starsCount":344,"forksCount":25,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-25T18:03:44.161Z"}},{"type":"Public","name":"llava-interactive","owner":"LLaVA-VL","isFork":false,"description":"LLaVA-Interactive: Chat, Segment and Generate/Edit an image -- All in one demo","allTopics":[],"primaryLanguage":{"name":"HTML","color":"#e34c26"},"pullRequestCount":0,"issueCount":0,"starsCount":6,"forksCount":4,"license":null,"participation":[0,0,0,5,1,0,3,3,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-25T17:53:36.402Z"}},{"type":"Public","name":"llava-vl.github.io","owner":"LLaVA-VL","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"HTML","color":"#e34c26"},"pullRequestCount":0,"issueCount":0,"starsCount":9,"forksCount":8,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-03-09T01:57:50.323Z"}},{"type":"Public","name":"LLaVA-Plus-Codebase","owner":"LLaVA-VL","isFork":false,"description":"LLaVA-Plus: Large Language and Vision Assistants that Plug and Learn to Use Skills","allTopics":["agent","tool-use","large-language-models","multimodal-large-language-models","large-multimodal-models"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":2,"issueCount":19,"starsCount":691,"forksCount":52,"license":"Apache License 2.0","participation":[0,4,0,31,42,12,18,46,14,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-02-01T12:51:10.973Z"}},{"type":"Public","name":"llava-grounding","owner":"LLaVA-VL","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"HTML","color":"#e34c26"},"pullRequestCount":0,"issueCount":0,"starsCount":2,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-12-08T16:13:29.536Z"}},{"type":"Public","name":"LLaVA","owner":"LLaVA-VL","isFork":true,"description":"[NeurIPS'23 Oral] Visual Instruction Tuning: LLaVA (Large Language-and-Vision Assistant) built towards GPT-4V level capabilities.","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":5,"forksCount":2110,"license":"Apache License 2.0","participation":[0,4,0,31,42,12,18,46,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-11-11T10:57:41.258Z"}},{"type":"Public","name":"LLaVA-Med-preview","owner":"LLaVA-VL","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"HTML","color":"#e34c26"},"pullRequestCount":0,"issueCount":3,"starsCount":27,"forksCount":5,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-11-10T16:33:49.139Z"}},{"type":"Public","name":"llava-plus","owner":"LLaVA-VL","isFork":false,"description":"Learning to Use Tools For Creating Multimodal Agents -- LLaVA-Plus (Large Language and Vision Assistants that Plug and Learn to Use Skills) ","allTopics":[],"primaryLanguage":{"name":"HTML","color":"#e34c26"},"pullRequestCount":0,"issueCount":0,"starsCount":4,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-11-10T02:33:12.741Z"}}],"repositoryCount":10,"userInfo":null,"searchable":true,"definitions":[],"typeFilters":[{"id":"all","text":"All"},{"id":"public","text":"Public"},{"id":"source","text":"Sources"},{"id":"fork","text":"Forks"},{"id":"archived","text":"Archived"},{"id":"template","text":"Templates"}],"compactMode":false},"title":"LLaVA-VL repositories"}