{"payload":{"pageCount":1,"repositories":[{"type":"Public","name":"Open-Sora-Plan","owner":"PKU-YuanGroup","isFork":false,"description":"This project aim to reproduce Sora (Open AI T2V model), we wish the open source community contribute to this project.","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":8,"issueCount":177,"starsCount":11089,"forksCount":987,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-08-05T07:23:05.441Z"}},{"type":"Public","name":"ChronoMagic-Bench","owner":"PKU-YuanGroup","isFork":false,"description":"ChronoMagic-Bench: A Benchmark for Metamorphic Evaluation of Text-to-Time-lapse Video Generation","allTopics":["benchmark","evaluation","time-lapse","video-generation","diffusion-models","text-to-video","time-lapse-dataset","open-sora-plan","metamorphic-video-generation"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":1,"starsCount":162,"forksCount":13,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,24,0,0,0,6,1],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-31T06:12:59.747Z"}},{"type":"Public","name":"MagicTime","owner":"PKU-YuanGroup","isFork":false,"description":"MagicTime: Time-lapse Video Generation Models as Metamorphic Simulators","allTopics":["time-lapse","video-generation","diffusion-models","text-to-video","long-video-generation","time-lapse-dataset","open-sora-plan","metamorphic-video-generation"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":6,"starsCount":1241,"forksCount":119,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-29T10:05:55.819Z"}},{"type":"Public","name":"Video-LLaVA","owner":"PKU-YuanGroup","isFork":false,"description":"Video-LLaVA: Learning United Visual Representation by Alignment Before Projection","allTopics":["multi-modal","instruction-tuning","large-vision-language-model"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":6,"issueCount":90,"starsCount":2769,"forksCount":197,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,2,10,0,62,24,6,1,0,0,1,1,15,13,7,0,0,3,1,0,0,0,0,0,0,0,0,0,0,0,4,0,0,0,0,0,0,0,0,0,1,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-27T08:19:37.006Z"}},{"type":"Public","name":"Chat-UniVi","owner":"PKU-YuanGroup","isFork":false,"description":"[CVPR 2024 Highlight🔥] Chat-UniVi: Unified Visual Representation Empowers Large Language Models with Image and Video Understanding","allTopics":["video-understanding","image-understanding","large-language-models","vision-language-model"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":11,"starsCount":737,"forksCount":39,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,1,24,29,8,0,0,0,1,5,0,0,0,0,0,0,1,0,1,0,0,0,3,1,0,0,0,0,0,2,2,0,0,0,0,2,0,1,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-21T15:33:07.066Z"}},{"type":"Public","name":"ProLLaMA","owner":"PKU-YuanGroup","isFork":false,"description":"A Protein Large Language Model for Multi-Task Protein Language Processing","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":116,"forksCount":13,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,45,8,1,0,0,0,2,6,0,8,0,0,0,0,0,1,0,1,5,0,1,1,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-17T07:31:49.215Z"}},{"type":"Public","name":"Open-Sora-Dataset","owner":"PKU-YuanGroup","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":1,"issueCount":4,"starsCount":91,"forksCount":5,"license":null,"participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,7,3,4,12,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-28T08:18:51.095Z"}},{"type":"Public","name":"LLMBind","owner":"PKU-YuanGroup","isFork":false,"description":"LLMBind: A Unified Modality-Task Integration Framework","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":1,"starsCount":14,"forksCount":2,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,5,9,3,0,0,0,0,0,0,0,0,0,0,0,0,0,31,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-16T16:39:55.674Z"}},{"type":"Public","name":"Hallucination-Attack","owner":"PKU-YuanGroup","isFork":false,"description":"Attack to induce LLMs within hallucinations","allTopics":["nlp","ai-safety","hallucinations","llm","llm-safety","machine-learning","deep-learning","adversarial-attacks"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":1,"starsCount":93,"forksCount":11,"license":"MIT License","participation":[0,0,0,0,0,0,0,6,3,4,0,9,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-17T08:48:15.526Z"}},{"type":"Public","name":"Envision3D","owner":"PKU-YuanGroup","isFork":false,"description":"Envision3D: One Image to 3D with Anchor Views Interpolation","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":1,"starsCount":100,"forksCount":8,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-16T18:46:32.791Z"}},{"type":"Public","name":"MoE-LLaVA","owner":"PKU-YuanGroup","isFork":false,"description":"Mixture-of-Experts for Large Vision-Language Models","allTopics":["moe","multi-modal","mixture-of-experts","large-vision-language-model"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":2,"issueCount":53,"starsCount":1866,"forksCount":116,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,10,7,0,51,64,39,22,28,0,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-15T14:50:56.572Z"}},{"type":"Public","name":"LanguageBind","owner":"PKU-YuanGroup","isFork":false,"description":"【ICLR 2024🔥】 Extending Video-Language Pretraining to N-modality by Language-based Semantic Alignment","allTopics":["multi-modal","zero-shot","pretraining","language-central"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":1,"issueCount":20,"starsCount":649,"forksCount":48,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-03-25T12:28:38.429Z"}},{"type":"Public","name":"TaxDiff","owner":"PKU-YuanGroup","isFork":false,"description":"The official code for \"TaxDiff: Taxonomic-Guided Diffusion Model for Protein Sequence Generation\"","allTopics":["protein-sequences","generate-model","meachine-learning","ai4science"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":1,"starsCount":45,"forksCount":6,"license":"MIT License","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,44,4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-03-04T01:21:18.395Z"}},{"type":"Public","name":"Peer-review-in-LLMs","owner":"PKU-YuanGroup","isFork":false,"description":"Peer-review-in-LLMs: Automatic Evaluation Method for LLMs in Open-environment,https://arxiv.org/pdf/2402.01830.pdf","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":25,"forksCount":2,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-02-07T02:30:30.281Z"}},{"type":"Public","name":"Machine-Mindset","owner":"PKU-YuanGroup","isFork":false,"description":"An MBTI Exploration of Large Language Models","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":438,"forksCount":20,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-02-02T02:53:05.571Z"}},{"type":"Public","name":"Video-Bench","owner":"PKU-YuanGroup","isFork":false,"description":"A Comprehensive Benchmark and Toolkit for Evaluating Video-based Large Language Models!","allTopics":["benchmark","toolkit","large-language-models"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":4,"starsCount":110,"forksCount":2,"license":null,"participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,69,16,3,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-12-31T03:41:41.734Z"}}],"repositoryCount":16,"userInfo":null,"searchable":true,"definitions":[],"typeFilters":[{"id":"all","text":"All"},{"id":"public","text":"Public"},{"id":"source","text":"Sources"},{"id":"fork","text":"Forks"},{"id":"archived","text":"Archived"},{"id":"template","text":"Templates"}],"compactMode":false},"title":"PKU-YuanGroup repositories"}