{"payload":{"pageCount":1,"repositories":[{"type":"Public","name":"ragas","owner":"explodinggradients","isFork":false,"description":"Evaluation framework for your Retrieval Augmented Generation (RAG) pipelines","allTopics":["llm","llmops"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":40,"issueCount":181,"starsCount":6531,"forksCount":638,"license":"Apache License 2.0","participation":[10,6,0,10,10,6,12,8,9,12,9,2,7,9,5,7,16,5,11,17,22,7,13,12,16,8,10,2,8,6,2,2,5,4,5,9,2,1,3,1,0,10,4,2,5,7,17,10,4,6,4,11],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-09-12T03:58:05.866Z"}},{"type":"Public","name":"notes","owner":"explodinggradients","isFork":false,"description":"Research notes and extra resources for all the work at explodinggradients.com","allTopics":["research","notes","opensourceai","llms"],"primaryLanguage":{"name":"Jupyter Notebook","color":"#DA5B0B"},"pullRequestCount":0,"issueCount":0,"starsCount":18,"forksCount":5,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-04-23T21:22:22.358Z"}},{"type":"Public","name":"LLaMA-Factory","owner":"explodinggradients","isFork":true,"description":"Unify Efficient Fine-tuning of 100+ LLMs","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":1,"forksCount":3775,"license":"Apache License 2.0","participation":[12,26,2,13,25,16,16,29,37,39,8,42,40,41,14,11,19,23,57,5,32,15,19,25,15,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-03-03T22:07:29.907Z"}},{"type":"Public","name":".github","owner":"explodinggradients","isFork":false,"description":"","allTopics":[],"primaryLanguage":null,"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":1,"license":null,"participation":[0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-11-24T19:21:40.927Z"}},{"type":"Public","name":"Funtuner","owner":"explodinggradients","isFork":false,"description":"Supervised instruction finetuning for LLM with HF trainer and Deepspeed ","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":32,"forksCount":4,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-07-06T13:18:16.591Z"}},{"type":"Public","name":"nemesis","owner":"explodinggradients","isFork":false,"description":"Reward Model framework for LLM RLHF","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":1,"issueCount":4,"starsCount":56,"forksCount":3,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-06-07T14:59:52.505Z"}}],"repositoryCount":6,"userInfo":null,"searchable":true,"definitions":[],"typeFilters":[{"id":"all","text":"All"},{"id":"public","text":"Public"},{"id":"source","text":"Sources"},{"id":"fork","text":"Forks"},{"id":"archived","text":"Archived"},{"id":"template","text":"Templates"}],"compactMode":false},"title":"explodinggradients repositories"}