mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-08-02 03:32:50 +08:00
Compare commits
1485 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
4ba7de0434 | ||
|
ea8a2d60d0 | ||
|
ae0ef374a3 | ||
|
edd112f35c | ||
|
7218d4aa96 | ||
|
4380b7b35e | ||
|
3307ff1d4a | ||
|
2aadc90c2d | ||
|
2353e16e20 | ||
|
6812f5e1f5 | ||
|
2077875622 | ||
|
678b7d69d2 | ||
|
f00742b078 | ||
|
fdb70c04e0 | ||
|
95ed6c45cd | ||
|
cf1087d409 | ||
|
766884fa5c | ||
|
6a8d88826e | ||
|
043103e1c9 | ||
|
5817583630 | ||
|
62bd2c8047 | ||
|
1b549e3199 | ||
|
c6290db118 | ||
|
d30cbcdfa5 | ||
|
62c6943699 | ||
|
8e7727f4ee | ||
|
e117e3c2b7 | ||
|
dcd75e7063 | ||
|
4465e4347e | ||
|
c5a08291f4 | ||
|
544b7dc2ed | ||
|
ac6c93df1f | ||
|
0b188ca00c | ||
|
0a004904bd | ||
|
bb7bf51554 | ||
|
7242caf0ff | ||
|
ed57b7ba2a | ||
|
b10333dafb | ||
|
6b46c8b689 | ||
|
be27eae175 | ||
|
31b0787e12 | ||
|
fffa43be86 | ||
|
8ed085e403 | ||
|
1221533542 | ||
|
8a3bddc7fa | ||
|
3a119ed5a2 | ||
|
0d7d0ea972 | ||
|
0e1fea71d2 | ||
|
ec04d7b89c | ||
|
cabc9207be | ||
|
f3d144f001 | ||
|
af2f75e688 | ||
|
9f2f12b0fe | ||
|
bb84c3c83e | ||
|
1cfe42916d | ||
|
5ed62a29c5 | ||
|
d39d3106cb | ||
|
8ffe7daa8d | ||
|
8fa55db1ec | ||
|
d8a5050cfb | ||
|
7ecc2d46ca | ||
|
d325a1a7c7 | ||
|
239ced076c | ||
|
f5f356649b | ||
|
13fd43617c | ||
|
fcd8662306 | ||
|
cecba57b3e | ||
|
83688b0b4d | ||
|
5308424705 | ||
|
81c4d9bee6 | ||
|
ee676d29f6 | ||
|
e3d5e0fa28 | ||
|
3425bc6e71 | ||
|
6cc247e815 | ||
|
16a3f8a71b | ||
|
65aa86ed39 | ||
|
fba9c9d9b9 | ||
|
21829b5eaf | ||
|
762c2d776f | ||
|
e31afdfd4f | ||
|
be02003d16 | ||
|
a4048b7bb6 | ||
|
73b12baaaf | ||
|
55d37dc472 | ||
|
54ffd06026 | ||
|
00c4988f89 | ||
|
57c6e23247 | ||
|
07f792148e | ||
|
e542f95710 | ||
|
4ecf4daeb2 | ||
|
519ac92803 | ||
|
16e1a5097e | ||
|
09436c1f45 | ||
|
f3a1dc8483 | ||
|
dc8cca11b3 | ||
|
e6f45d696b | ||
|
c477ae6405 | ||
|
52dead8775 | ||
|
a9211a730e | ||
|
763fbc294b | ||
|
b0c8ba73e0 | ||
|
b3b2c9f1ee | ||
|
f96c085857 | ||
|
b83a38eb98 | ||
|
f3fd67a9bb | ||
|
a6f3adf930 | ||
|
ed2f89efaf | ||
|
16e26236eb | ||
|
89a0d10c18 | ||
|
820ed764c4 | ||
|
66f719dd96 | ||
|
130bfaf8e3 | ||
|
e8a18c17e9 | ||
|
2b23c0a7a1 | ||
|
ab2c05115b | ||
|
8d472c20cb | ||
|
845af89ea4 | ||
|
cef3a0b2e2 | ||
|
865ac07491 | ||
|
f584db50cf | ||
|
97e0a4cb5c | ||
|
c6bcca4c83 | ||
|
5ee9eb64d8 | ||
|
937447bd8a | ||
|
52f25651a2 | ||
|
75d7c35fdf | ||
|
6a584b4092 | ||
|
41ec928683 | ||
|
d8295cd601 | ||
|
a8430f4244 | ||
|
072bfe29d3 | ||
|
c5b1d07e7c | ||
|
77c569e071 | ||
|
ae392e054c | ||
|
369474451d | ||
|
1f338deb87 | ||
|
00b5c05946 | ||
|
1bd319d16c | ||
|
fcca3b0b0d | ||
|
035e98035c | ||
|
b4407e4b0b | ||
|
036a76e9cb | ||
|
4fbdc65fcb | ||
|
2989d39239 | ||
|
1344416378 | ||
|
1dd67eb042 | ||
|
2b7d564e3b | ||
|
d43013f14a | ||
|
c91165a5a6 | ||
|
7f3c31f6f4 | ||
|
92101f34a1 | ||
|
a62cba3d05 | ||
|
d128382d3c | ||
|
278df4308d | ||
|
81768df04c | ||
|
1302ca39f6 | ||
|
b8cddbc7d7 | ||
|
ec7257e70f | ||
|
a4455e3021 | ||
|
610f164c69 | ||
|
0a0cfeb782 | ||
|
4831552856 | ||
|
125513fa5c | ||
|
8543400584 | ||
|
e1fdd6e2f8 | ||
|
d07983dceb | ||
|
9b94211045 | ||
|
0fe5631f9b | ||
|
b5d667cebf | ||
|
ac8c6fdd3a | ||
|
df8752e8ee | ||
|
3a13d2cdb1 | ||
|
3ef36d0057 | ||
|
1fd4d14fbb | ||
|
481ecbf9c5 | ||
|
60a84f664b | ||
|
11bcafd06a | ||
|
6c53471de2 | ||
|
39c1e29ed7 | ||
|
ee840b4e01 | ||
|
3bdc7e1e6c | ||
|
34fdabe005 | ||
|
24cb890432 | ||
|
39876b85fc | ||
|
7d8bee96fc | ||
|
8f5f4cc559 | ||
|
8ee26642f3 | ||
|
5817cda37e | ||
|
7e0cdb1a76 | ||
|
6c200fd218 | ||
|
61b24c3827 | ||
|
32cb086be1 | ||
|
80f8d037d0 | ||
|
11997593be | ||
|
903db09822 | ||
|
aaf2e6ba2a | ||
|
9deece1d50 | ||
|
f06a74ad4e | ||
|
6faa6fb53d | ||
|
5d1cc863a4 | ||
|
6d6e0f44fc | ||
|
2d421c57bf | ||
|
185c76f6ad | ||
|
468eea6f6d | ||
|
49436e93e6 | ||
|
b00cb2ed42 | ||
|
f547334604 | ||
|
01166841cf | ||
|
59e12bffe8 | ||
|
b6d8749bf3 | ||
|
bc9ada9db7 | ||
|
b6dc7e01e2 | ||
|
59a56f7226 | ||
|
9abee9cd1a | ||
|
833edc7c73 | ||
|
42e090d38b | ||
|
747e02d60d | ||
|
c841e92116 | ||
|
fbf49e2500 | ||
|
7d4dc25c23 | ||
|
b1b78daf06 | ||
|
dfbe1391e9 | ||
|
ebc989ad4a | ||
|
d8a5571be7 | ||
|
555b71a1cb | ||
|
4a5d0f0ba7 | ||
|
c518146e62 | ||
|
1d2131e5cb | ||
|
48a6584fb1 | ||
|
a71e685021 | ||
|
30038d9ce7 | ||
|
ef5f1c1def | ||
|
3dff4ecca8 | ||
|
0dbce72fb8 | ||
|
e9b427d535 | ||
|
d7d79f7e06 | ||
|
9ccfb97a2c | ||
|
165d3ed084 | ||
|
142fd7e755 | ||
|
7c1640ed5f | ||
|
cdafa8a15e | ||
|
b256ca86f0 | ||
|
7a7071e504 | ||
|
847ae972d0 | ||
|
1c634d9c53 | ||
|
99b71768a0 | ||
|
37b844d929 | ||
|
f5810a6e47 | ||
|
317d0855d2 | ||
|
0a43bc1960 | ||
|
5a29f49fb1 | ||
|
4e68828e46 | ||
|
9a0044ef5e | ||
|
d412301d08 | ||
|
5a0fd22c05 | ||
|
df63f05b47 | ||
|
98ea0e8109 | ||
|
33b4c33279 | ||
|
113cc3d920 | ||
|
b6c0e8608e | ||
|
eba31ae313 | ||
|
e7556b591e | ||
|
2b21c749c1 | ||
|
002f58ef8e | ||
|
c67d2b9327 | ||
|
6e58115f98 | ||
|
8dddffa340 | ||
|
e1d574a784 | ||
|
caef0a8937 | ||
|
392533e139 | ||
|
299cd03785 | ||
|
ee1b580328 | ||
|
54a090079c | ||
|
210cdb9557 | ||
|
e86cb8a4fa | ||
|
f4aa0a146c | ||
|
96636c3729 | ||
|
81947f1d2c | ||
|
dca5fe14c2 | ||
|
ca78ba964d | ||
|
9359ee18ad | ||
|
15f3087b96 | ||
|
1fcedf9af6 | ||
|
b0bbacaacb | ||
|
beb1a9f9d9 | ||
|
3fbd4848e8 | ||
|
184c5d0882 | ||
|
1f4a0b11ba | ||
|
b1d31ff0f9 | ||
|
a8c9d5663d | ||
|
475a355b82 | ||
|
3dc938268c | ||
|
e55ec42d3c | ||
|
2baf8bf03d | ||
|
13e1b7ee2b | ||
|
cd493b91de | ||
|
48173b606c | ||
|
0ad9f7f058 | ||
|
1adb46875f | ||
|
9b852ebe25 | ||
|
07aa7b71a3 | ||
|
1679930e00 | ||
|
d50e04b805 | ||
|
e515fe62de | ||
|
036fb0d561 | ||
|
bae934dea3 | ||
|
2e2f6bea07 | ||
|
1b02183da9 | ||
|
197aa3baf4 | ||
|
c6be9e242c | ||
|
2e954d8fd2 | ||
|
fafa3add84 | ||
|
593acca556 | ||
|
188f22d8a7 | ||
|
703bb9cc18 | ||
|
5433b318bb | ||
|
fe4f4e9758 | ||
|
1bb3d17d9e | ||
|
b93333685b | ||
|
fcd0f0480d | ||
|
ff6658ad27 | ||
|
28037c7834 | ||
|
f70208e1c0 | ||
|
7aa9767dc2 | ||
|
01915eaf40 | ||
|
e665e1fed5 | ||
|
1fee69f874 | ||
|
8504bde893 | ||
|
85f22d01bf | ||
|
822d5d362c | ||
|
32163e7ce0 | ||
|
454140d912 | ||
|
445d643ef3 | ||
|
e8c1979b79 | ||
|
f6779b0e0c | ||
|
245de012ca | ||
|
f143360ee6 | ||
|
f5350b103b | ||
|
aa7c07caf0 | ||
|
324f07613a | ||
|
0c59483368 | ||
|
1efe525df7 | ||
|
ee0b3b1e1a | ||
|
f87c788154 | ||
|
bbf334f823 | ||
|
770433fa33 | ||
|
788accb601 | ||
|
555f17c1ee | ||
|
8895cf1152 | ||
|
320e40d873 | ||
|
9ef85f8fc4 | ||
|
763f9b9df0 | ||
|
57043fb4e6 | ||
|
91433d639c | ||
|
864ee06243 | ||
|
a52496cc09 | ||
|
ad119afc58 | ||
|
8f73c75c16 | ||
|
5e699458e5 | ||
|
201a495154 | ||
|
d8cba9464f | ||
|
089c7d5e51 | ||
|
15bba15725 | ||
|
0b47c2a293 | ||
|
313ce9a576 | ||
|
ee87d318b8 | ||
|
4741eec2d1 | ||
|
d2afe0c63c | ||
|
bdded9d41a | ||
|
8c79fe6a5a | ||
|
63bb2b7235 | ||
|
e7f928adc4 | ||
|
62c12a133e | ||
|
08e8499a98 | ||
|
d5b18ee4a6 | ||
|
93cc1f167b | ||
|
c89d17ab63 | ||
|
9213e48fa2 | ||
|
0fb50f9c88 | ||
|
bcbe37ff52 | ||
|
994049380d | ||
|
cc6a6f698f | ||
|
7138b43873 | ||
|
aeb4f82ef2 | ||
|
f51ac40f0a | ||
|
165fe8e219 | ||
|
4243c618f0 | ||
|
368d22f79a | ||
|
b3561ae552 | ||
|
b395540826 | ||
|
a1b5644889 | ||
|
b471def13d | ||
|
b777fed171 | ||
|
618ceda6e9 | ||
|
014a7ea042 | ||
|
da542fad18 | ||
|
984b202f83 | ||
|
0c1ad5f3fb | ||
|
b4174021d6 | ||
|
bba52e258e | ||
|
1217240918 | ||
|
a0bcac80c0 | ||
|
8c57169eb7 | ||
|
b9eeaa9706 | ||
|
621d73e87c | ||
|
a02a140840 | ||
|
a0188a430f | ||
|
b5ef5059ee | ||
|
084d356c2c | ||
|
20a9565e36 | ||
|
85317bcbaf | ||
|
528fb4f799 | ||
|
aa7ec44367 | ||
|
b2ecb80729 | ||
|
9a3afbd5d1 | ||
|
37c60c7d14 | ||
|
b921dde749 | ||
|
d195329185 | ||
|
da8721a70e | ||
|
f318dc9464 | ||
|
01bbe66f41 | ||
|
bb664d2fc5 | ||
|
d0e729cd33 | ||
|
1178cb0e33 | ||
|
089f824cd1 | ||
|
813f5919a3 | ||
|
951d845af2 | ||
|
3bcb4633ca | ||
|
c76c33ddb1 | ||
|
a37ef0eaae | ||
|
377dfe5665 | ||
|
f6d5dd6f10 | ||
|
a36f9d923e | ||
|
c83b74ab9e | ||
|
c5780f5eaa | ||
|
4cd1d05429 | ||
|
459219a260 | ||
|
353259f03f | ||
|
8265d6a228 | ||
|
c0418062c0 | ||
|
47c2d91933 | ||
|
f07bad7144 | ||
|
9d437a5f4f | ||
|
1c1d6bea43 | ||
|
547f76e56e | ||
|
67d4757c35 | ||
|
cc703b58f5 | ||
|
8f786ee938 | ||
|
03dba638e6 | ||
|
dd22454fc5 | ||
|
904f18b4a2 | ||
|
b512a06c3d | ||
|
c31933ef9e | ||
|
8524dcaa4a | ||
|
53103f55b6 | ||
|
cc5cde734b | ||
|
af9ef037dd | ||
|
95d3c2620b | ||
|
1a48340680 | ||
|
d6ce1045f7 | ||
|
92a0d08e27 | ||
|
910884065e | ||
|
433d116080 | ||
|
d43080b534 | ||
|
5f0dd86c15 | ||
|
a421113466 | ||
|
acd62fddb8 | ||
|
d8f6569be1 | ||
|
857d23b324 | ||
|
ad00c793ce | ||
|
f6a2bfc0e8 | ||
|
1cc24ed206 | ||
|
a935933bed | ||
|
09419dfbab | ||
|
76ebd62ac1 | ||
|
fe4546a7bb | ||
|
cfb4c42ae4 | ||
|
fc18db6290 | ||
|
64bac4bc7e | ||
|
002c7d2867 | ||
|
a94a1eac67 | ||
|
a8a990a9a7 | ||
|
bff1b94583 | ||
|
4caf043cf8 | ||
|
50ca43c3fb | ||
|
0f49e9cb07 | ||
|
ba901bc000 | ||
|
6f1e450739 | ||
|
93d1cba06e | ||
|
cf8cad8e7e | ||
|
255260cfcb | ||
|
88b06a0c7f | ||
|
7f8c59144e | ||
|
90fb5605c1 | ||
|
9f9ad6435d | ||
|
819f487c8f | ||
|
9bbeba6323 | ||
|
92940817e7 | ||
|
68614f6bc1 | ||
|
235cdcacee | ||
|
b2c67a989a | ||
|
ed4c4bab49 | ||
|
1804e8a491 | ||
|
0ef1dc4dd5 | ||
|
b34c3bb796 | ||
|
aa5535c622 | ||
|
d4bf81b36a | ||
|
99265c7d2f | ||
|
0efa34c9ef | ||
|
f4729904f2 | ||
|
1c3d86cd65 | ||
|
f5e6e25a1b | ||
|
ae09c6c214 | ||
|
265a5821de | ||
|
9822cb7bac | ||
|
d51d96d594 | ||
|
09a3a59c88 | ||
|
dfa4e927dd | ||
|
61320965aa | ||
|
ab3782b0fa | ||
|
6cd90efb82 | ||
|
08ca40876a | ||
|
358708ee97 | ||
|
006022cadd | ||
|
e0325b1123 | ||
|
118ffe50e3 | ||
|
a6aeb98af6 | ||
|
c0ffe68745 | ||
|
1a8c26a7d9 | ||
|
4f1d5b6396 | ||
|
697657271f | ||
|
a489f10986 | ||
|
f48d07cd6c | ||
|
f1a1fb675b | ||
|
f8f3638bff | ||
|
1176cd6779 | ||
|
4165c4414d | ||
|
7f74e54bee | ||
|
3f3eeedca0 | ||
|
65699c29d4 | ||
|
a04538e70b | ||
|
708116a5f3 | ||
|
5a7ff02806 | ||
|
dcc67ac1a5 | ||
|
7ed5a712f8 | ||
|
7eaafe08bc | ||
|
503327b5bb | ||
|
4bba121f43 | ||
|
8dff6f630c | ||
|
253752ccca | ||
|
e99031daa4 | ||
|
23fc0c863e | ||
|
3295519099 | ||
|
20faaf3418 | ||
|
24419dd3f1 | ||
|
1eaaa85980 | ||
|
e272f87849 | ||
|
4d49dc0559 | ||
|
527ea7297b | ||
|
302e4e22bf | ||
|
2ea2bc7c71 | ||
|
505edd4da0 | ||
|
e27a0c3d53 | ||
|
32656bc50d | ||
|
bf2b8df540 | ||
|
7ad5b5c088 | ||
|
a2a9936e1f | ||
|
618a8e6c9f | ||
|
acd70faf17 | ||
|
9815d1712c | ||
|
e3a0640659 | ||
|
d4e0010027 | ||
|
d6b9a2024b | ||
|
625ae6f456 | ||
|
1c1e48a570 | ||
|
8ae2056b59 | ||
|
9c394f11ef | ||
|
662093b38c | ||
|
e928f7fc4c | ||
|
82344629af | ||
|
0d18cca0db | ||
|
0386fa6a4f | ||
|
d7476aa55b | ||
|
d996bea1c7 | ||
|
1e00de38cb | ||
|
d7e35ddcaa | ||
|
162f7028fc | ||
|
97f4451912 | ||
|
265875fffd | ||
|
bf2959c175 | ||
|
2e6dd72539 | ||
|
1598e5d355 | ||
|
2360d63ebc | ||
|
d68af5b04a | ||
|
3730fc046f | ||
|
7f7ee0a660 | ||
|
e3fb3c313c | ||
|
9d463b611c | ||
|
feb547aa99 | ||
|
f05685c7cf | ||
|
8c2b7aa1ab | ||
|
d99e164cad | ||
|
7dbb338df7 | ||
|
604f4005c9 | ||
|
c2766af6f4 | ||
|
e7b11e4fdb | ||
|
5ad86fc71d | ||
|
6f79974e8b | ||
|
51177c933a | ||
|
0f53217bbc | ||
|
e83cb17f97 | ||
|
7806bde8ad | ||
|
4b2c47fcae | ||
|
33e8bfc3ae | ||
|
ac677205c9 | ||
|
7fa46a24df | ||
|
7555c54c9f | ||
|
3f7c874594 | ||
|
25093c2d82 | ||
|
2eba98e152 | ||
|
8ecc12ee2a | ||
|
1ab3ea21ce | ||
|
9108df2b97 | ||
|
7487bd7b1f | ||
|
efda735f32 | ||
|
584ce3a105 | ||
|
1b02915d19 | ||
|
aba4268607 | ||
|
5142faca8f | ||
|
15786539d7 | ||
|
49054329d0 | ||
|
54961946ac | ||
|
2179b91acb | ||
|
fb8f35558a | ||
|
90cd3538de | ||
|
eca50b89a2 | ||
|
2876b429bc | ||
|
acfff4319a | ||
|
b86b869187 | ||
|
233556d1c7 | ||
|
4f10d3e28c | ||
|
13c7e873e0 | ||
|
3053a806e9 | ||
|
6cd0d7da29 | ||
|
d183966a5d | ||
|
825ea1c72d | ||
|
0d8aa6e6ef | ||
|
163cf2ba5c | ||
|
6989b8c341 | ||
|
c24d477bdb | ||
|
3e3969784f | ||
|
823d7f5c81 | ||
|
f03b20b267 | ||
|
9d1f079ca5 | ||
|
b9c6fcfe98 | ||
|
9079967ecf | ||
|
006b708b57 | ||
|
a7a5a5671f | ||
|
00545ebbe5 | ||
|
62cbcb646a | ||
|
bdb77bc85a | ||
|
b7b30fc961 | ||
|
63f0f9cf5d | ||
|
8fb211ad0e | ||
|
af50c03879 | ||
|
6fbf77aa54 | ||
|
79433fb6a6 | ||
|
16d4149c25 | ||
|
a24f94a36c | ||
|
9c4941a1ea | ||
|
bcb40fddc0 | ||
|
ae869639dd | ||
|
66ee9f0489 | ||
|
9a9716c228 | ||
|
a3f37777c1 | ||
|
5e440a467d | ||
|
7ba5488569 | ||
|
df722bf18e | ||
|
2c5f912e16 | ||
|
8ea1c5c69e | ||
|
92de726102 | ||
|
e90a1199da | ||
|
012f4fef6b | ||
|
e7e8d006cc | ||
|
5a3280ebee | ||
|
27be1e2122 | ||
|
39929bda5a | ||
|
e8e98bb125 | ||
|
132c1f1b0f | ||
|
26e897e861 | ||
|
5523a6fd2c | ||
|
4464a6ff5b | ||
|
a95fe78ae2 | ||
|
c639e52c6b | ||
|
d650e461f9 | ||
|
b855d3421e | ||
|
ef87942a42 | ||
|
059c2ffbea | ||
|
93f14bf121 | ||
|
20cf39ef63 | ||
|
139811bd80 | ||
|
74653597f1 | ||
|
9a6045eee6 | ||
|
332614579c | ||
|
56132983cf | ||
|
2b3173e5d2 | ||
|
4bae540cd6 | ||
|
e51a6f6367 | ||
|
f3ac97a749 | ||
|
dd2d1c3154 | ||
|
4df090ff48 | ||
|
6729ed2c7e | ||
|
ececd68f9a | ||
|
94ce8f561f | ||
|
4f85098088 | ||
|
3720618c63 | ||
|
b664bcf307 | ||
|
a3f99f123a | ||
|
85ed108fa6 | ||
|
54e749d1cf | ||
|
69e801d456 | ||
|
b596102bd5 | ||
|
5f1209bee6 | ||
|
73b684c7b0 | ||
|
b5eb939ce3 | ||
|
0a633f8098 | ||
|
8d20d6c95c | ||
|
1f1b8e825d | ||
|
f051bff1e6 | ||
|
853c95bb89 | ||
|
4269b4b49a | ||
|
6e4d5d9b2a | ||
|
81cf3bff08 | ||
|
f1b16236a4 | ||
|
1c69eea995 | ||
|
b37bb592ec | ||
|
362788cb09 | ||
|
87ab7fc01c | ||
|
15dbd4893e | ||
|
ddec40ac16 | ||
|
8e164f3594 | ||
|
1b71afb277 | ||
|
1ca0ccb4a6 | ||
|
8231359bbb | ||
|
ec793d16de | ||
|
ce0c73c032 | ||
|
ee3fe4226d | ||
|
534dc58363 | ||
|
66213043ac | ||
|
8ee588248e | ||
|
96d51325ad | ||
|
1e2ea34419 | ||
|
e265082db8 | ||
|
786e013375 | ||
|
615edf937e | ||
|
5df765e376 | ||
|
a2452d0b1c | ||
|
6ae0e27c8b | ||
|
fd79cf8551 | ||
|
66e473d519 | ||
|
0e33902f61 | ||
|
7ab42cb582 | ||
|
3501257780 | ||
|
5aa1e847d9 | ||
|
c576b7ca32 | ||
|
b76116bb6c | ||
|
35e44143fd | ||
|
c436d6ea0b | ||
|
bc7197dcfc | ||
|
5b128e6b0e | ||
|
a73988141b | ||
|
b70da07977 | ||
|
e80c98367e | ||
|
fb75821793 | ||
|
33fc7bec85 | ||
|
eb00df98be | ||
|
f30e0a75c4 | ||
|
52a6667da6 | ||
|
3bcfd73898 | ||
|
4aa0493e26 | ||
|
d1e766898c | ||
|
fefe2aa0e4 | ||
|
944ae8780c | ||
|
1ccc2d198e | ||
|
e4d26efea9 | ||
|
89138b8ab8 | ||
|
545ca7db99 | ||
|
daa1309466 | ||
|
4ed2b629a5 | ||
|
c9cd388630 | ||
|
27da57b179 | ||
|
f2dca55ae9 | ||
|
47e4b3724b | ||
|
2d19ba04e3 | ||
|
56058e2e84 | ||
|
90610a9093 | ||
|
38e955d4a9 | ||
|
2a5e52db51 | ||
|
c4e7809ad5 | ||
|
df9f30fdf8 | ||
|
a39dc400ed | ||
|
5456ec5fe1 | ||
|
d2f8bcb890 | ||
|
acfe212a63 | ||
|
42b6e1c6c1 | ||
|
009500bc6d | ||
|
75585d01f0 | ||
|
0be477292b | ||
|
4b6606832c | ||
|
16c7326bc5 | ||
|
677d57b7c7 | ||
|
5c53cf3244 | ||
|
f00f4ae9b6 | ||
|
38505ae9e1 | ||
|
2bb1ee3292 | ||
|
c52eeb70e7 | ||
|
3aefdad4ec | ||
|
561ae4d1af | ||
|
fb9280a0a7 | ||
|
0229263fbe | ||
|
ec6b85d8f9 | ||
|
dc64166d13 | ||
|
78cf256067 | ||
|
f6f58ebef0 | ||
|
945841503e | ||
|
0daee7cb39 | ||
|
7ccb86b215 | ||
|
857d5b9d0a | ||
|
4d8b782268 | ||
|
4f28e0e5d2 | ||
|
ab1775cd95 | ||
|
4ee9efbd98 | ||
|
995491594d | ||
|
52d3c42265 | ||
|
5585713182 | ||
|
3aa6a3e45b | ||
|
de277a8ab8 | ||
|
1797fe50a4 | ||
|
f6014742fa | ||
|
dec6ff046b | ||
|
c4d7d76358 | ||
|
4fccc65579 | ||
|
666013d09d | ||
|
9f36534b49 | ||
|
cc02fb6180 | ||
|
ce77a89d8c | ||
|
ac33d2f4da | ||
|
9df7a26e6b | ||
|
09cff03026 | ||
|
60d770e4b1 | ||
|
d5ea05cfff | ||
|
1dfd1aaf82 | ||
|
8ac74c8ccb | ||
|
53aeacae81 | ||
|
af8c4b4e20 | ||
|
a3d47818b7 | ||
|
a65b5061af | ||
|
22deca0e9e | ||
|
5ef58eb655 | ||
|
f837ae8cb5 | ||
|
982585e375 | ||
|
6e98872622 | ||
|
46695e42cc | ||
|
5af92971bc | ||
|
5c9972a2d5 | ||
|
b2a5f49a24 | ||
|
f13e974930 | ||
|
859823eb23 | ||
|
549adc888b | ||
|
65425aeb63 | ||
|
fd2a99ee5c | ||
|
69fbecf1fb | ||
|
bfdcc6bacf | ||
|
236f97b35c | ||
|
413a206652 | ||
|
cb776752f6 | ||
|
7c0d1a5ff1 | ||
|
34dc36462c | ||
|
09a2ecebc4 | ||
|
f31e7e0dfc | ||
|
51a0016873 | ||
|
c883542583 | ||
|
92c398166d | ||
|
913ee05e74 | ||
|
a83756b5e9 | ||
|
98b0c7530c | ||
|
5730243179 | ||
|
0e4ee9d9a3 | ||
|
f153ee13be | ||
|
21d3976eea | ||
|
1494fa1f18 | ||
|
d9fd8f26cc | ||
|
8a09b1e732 | ||
|
efd60f0306 | ||
|
c2df70e925 | ||
|
7b5834b2dd | ||
|
7cbfd64289 | ||
|
388c3e4e47 | ||
|
53a2b16c38 | ||
|
c765292093 | ||
|
f3778f65f8 | ||
|
95500687dd | ||
|
7f6d267e98 | ||
|
e9800212fa | ||
|
f78735528d | ||
|
a620fa8be3 | ||
|
daebca2368 | ||
|
a7604a95c1 | ||
|
103132aa99 | ||
|
033dd1f1dc | ||
|
5e19a604a6 | ||
|
5582674f06 | ||
|
57b51c8617 | ||
|
a921505f59 | ||
|
fc1aefa4b1 | ||
|
a9312387bc | ||
|
41a8387195 | ||
|
66a7f4f128 | ||
|
8a7ab8ab21 | ||
|
753cb0f9b6 | ||
|
919472435f | ||
|
3e159a0a83 | ||
|
684d621edc | ||
|
a0f1cc7445 | ||
|
bea270042b | ||
|
a8add5c04b | ||
|
f8c11bd540 | ||
|
5eacd17090 | ||
|
792da85866 | ||
|
25b9cfa163 | ||
|
b5146facff | ||
|
397e4daa5d | ||
|
fae264f4b9 | ||
|
54f57fb354 | ||
|
13e5fff97a | ||
|
0bd25c3a6b | ||
|
0404b17718 | ||
|
421d4f91c2 | ||
|
17c73b44da | ||
|
44f7c4dd56 | ||
|
b0d32b2041 | ||
|
1cc927b536 | ||
|
733cb9087b | ||
|
20013e130b | ||
|
e703d80536 | ||
|
7125b6cf70 | ||
|
a18c2b124e | ||
|
e5aea29108 | ||
|
e678c1ccb2 | ||
|
8fd71e1aa3 | ||
|
b33d668e17 | ||
|
2f72383969 | ||
|
f510c2d279 | ||
|
f49ebf1af9 | ||
|
ccc0825008 | ||
|
3c3a5c09dc | ||
|
8132725f2e | ||
|
884b0bbb4f | ||
|
0ac7824822 | ||
|
29d9a9827a | ||
|
e2720c11b1 | ||
|
ab477e1650 | ||
|
ca3dac9fb3 | ||
|
d4e84b9a11 | ||
|
f38decfbaf | ||
|
4aaeba0f78 | ||
|
27f42f6319 | ||
|
ed5c75bd64 | ||
|
bc36e36658 | ||
|
d984942c82 | ||
|
422771589f | ||
|
4e429f2e05 | ||
|
0aad78694a | ||
|
d63beb7a24 | ||
|
019c6dad84 | ||
|
fae881b854 | ||
|
91e54d458f | ||
|
e0875f82b3 | ||
|
ed05486b98 | ||
|
d4440c07b6 | ||
|
1cddf80a97 | ||
|
37c6a0c6dc | ||
|
dbe26e7cdf | ||
|
8d06679a3f | ||
|
81fd5097cc | ||
|
ee4c3f32d1 | ||
|
726e7046db | ||
|
542658c986 | ||
|
f5cfea56bd | ||
|
488f392c81 | ||
|
d0891f05fa | ||
|
e1e01d7efd | ||
|
34f16cc635 | ||
|
c8e77c11d1 | ||
|
249adacc4d | ||
|
3c7b10b1fa | ||
|
e90fae61f4 | ||
|
ca40e42b3c | ||
|
9d8e0f0837 | ||
|
d63c3be556 | ||
|
c790997fbc | ||
|
b19e2f84b6 | ||
|
df1f0a1258 | ||
|
c0493daa60 | ||
|
f67ddc05d6 | ||
|
84e6715423 | ||
|
ae0f4ba2d3 | ||
|
7146b9457c | ||
|
76046dfda8 | ||
|
22859b8734 | ||
|
14bc7b0551 | ||
|
2b22a7da48 | ||
|
788dc1c679 | ||
|
30a3c6e886 | ||
|
eed7cbb453 | ||
|
5633c0ab1e | ||
|
2e9c9471da | ||
|
a9a652eb6f | ||
|
140b512426 | ||
|
ddbd848e49 | ||
|
2564269826 | ||
|
380c7741f5 | ||
|
dfd2d912cd | ||
|
12e0e5d0d7 | ||
|
0b26011181 | ||
|
fb387ae1c3 | ||
|
e83e36e475 | ||
|
3baf1d1f7e | ||
|
f616fd61bd | ||
|
f9a4d96194 | ||
|
1ccc6153c7 | ||
|
955e01c038 | ||
|
93ba3bd5b0 | ||
|
b3e4793ded | ||
|
0fa59c9b4c | ||
|
f85187b4dd | ||
|
2528487847 | ||
|
4edd7c3529 | ||
|
973aac3203 | ||
|
a9ce54d143 | ||
|
d7130ec635 | ||
|
aa15ca1719 | ||
|
7e9d51fb95 | ||
|
553e517f0f | ||
|
7483e187c6 | ||
|
7ca84e0a09 | ||
|
f3c105f088 | ||
|
c8205c5163 | ||
|
7fcffb860d | ||
|
d97bb11821 | ||
|
74f0d02eb8 | ||
|
8379a39776 | ||
|
9aa3403687 | ||
|
956e555310 | ||
|
c1262dbf94 | ||
|
e17f12fcad | ||
|
d08456c0ce | ||
|
6d892dbc23 | ||
|
aa14a625e4 | ||
|
d7657d772d | ||
|
cbb93a2b47 | ||
|
4987aa32ba | ||
|
c15210a312 | ||
|
7b3c1f29ff | ||
|
a38ff842d0 | ||
|
bfdaadcc40 | ||
|
51c75985b8 | ||
|
13cec0cc2f | ||
|
e671ed520b | ||
|
ff6fc666c1 | ||
|
b254df2d34 | ||
|
28c8e083f4 | ||
|
e5c89890b1 | ||
|
3595d98b4c | ||
|
0d438e5cf4 | ||
|
34bec52cc4 | ||
|
84f8113bb1 | ||
|
3881f4eb58 | ||
|
104151d558 | ||
|
c9e9beee4e | ||
|
ea2d3f6c18 | ||
|
4828bed837 | ||
|
cc31014002 | ||
|
7f42932957 | ||
|
1e0a2f722b | ||
|
591a4cf42a | ||
|
773d857eb8 | ||
|
28e787116b | ||
|
08296f4092 | ||
|
2452f57cd7 | ||
|
48a299f8ae | ||
|
2cf03017a0 | ||
|
de4de5b5ab | ||
|
54e786346e | ||
|
a475d808f2 | ||
|
ca7b65439d | ||
|
768093c789 | ||
|
bbc37b2880 | ||
|
4b1ab6c83d | ||
|
2b006beab1 | ||
|
c3792dae9f | ||
|
87e60f8bac | ||
|
b0acd27114 | ||
|
cba994b9dc | ||
|
d3b7c489f2 | ||
|
2105cf6000 | ||
|
835f0578c2 | ||
|
1dad756cff | ||
|
a294ef2fae | ||
|
5e60c29971 | ||
|
7c488cea57 | ||
|
ecbbed94d9 | ||
|
37d3adb1f8 | ||
|
7ae015f8bf | ||
|
ffe7cda93d | ||
|
030dda5861 | ||
|
506f6d79e6 | ||
|
30687babe3 | ||
|
65bf205968 | ||
|
e384ef7d57 | ||
|
6a518d386a | ||
|
9a2e77a5d8 | ||
|
9e93ff9479 | ||
|
e1751f6398 | ||
|
58607ec1b0 | ||
|
6e03536dca | ||
|
f2425cb4ed | ||
|
da80d41c78 | ||
|
d2d9fa4abb | ||
|
c662c2e56f | ||
|
6b2733ce12 | ||
|
28e613efd0 | ||
|
654116c0b1 | ||
|
e7bd3ab6c3 | ||
|
2300fb616b | ||
|
7c7d6614d8 | ||
|
08a221443c | ||
|
1a79dd23ff | ||
|
f3f25ae3b7 | ||
|
80effa2993 | ||
|
0ae1302e41 | ||
|
ad0304e147 | ||
|
a225b5a70c | ||
|
dafc9268bc | ||
|
fe6ef6400c | ||
|
d519c2fde5 | ||
|
ab1fbbc3ec | ||
|
678884f97c | ||
|
cbc23fc299 | ||
|
af5b2b9299 | ||
|
6cd45e95f7 | ||
|
62e63d74ec | ||
|
cfa2dbefcb | ||
|
f84bce3638 | ||
|
37a079a072 | ||
|
60937ccf32 | ||
|
709bbc1d92 | ||
|
18863245df | ||
|
b7f5cfde6e | ||
|
673f27a59e | ||
|
47651a94a3 | ||
|
f3a2dda567 | ||
|
78baa8a509 | ||
|
1a0758b0a1 | ||
|
fe407e8de6 | ||
|
e74fcdf7b1 | ||
|
a9f10a9abd | ||
|
9aa640f27b | ||
|
f923989a6e | ||
|
3b499948a5 | ||
|
a1df18c5df | ||
|
7be502c5c5 | ||
|
bb9f48590f | ||
|
c0e005e2ea | ||
|
98abb5c900 | ||
|
ccc9a895a6 | ||
|
cf23a279fd | ||
|
5319447aa5 | ||
|
0844750bb9 | ||
|
7d3b21684c | ||
|
cd563116ca | ||
|
6ea4680334 | ||
|
029c343537 | ||
|
030b4811c7 | ||
|
80e9f8e000 | ||
|
fded2306dc | ||
|
9c1b04cd11 | ||
|
3d72b1a856 | ||
|
7735456561 | ||
|
53b48eb052 | ||
|
c779899f7b | ||
|
c9557241f6 | ||
|
e73a235a38 | ||
|
bccc852f76 | ||
|
6db02615d4 | ||
|
89564e90d7 | ||
|
9e5988717d | ||
|
9055e66643 | ||
|
9b30635ff0 | ||
|
e3bf22f61b | ||
|
5156114981 | ||
|
b596addd1f | ||
|
09c34e5b6c | ||
|
15a5eb6647 | ||
|
bc1c082bc2 | ||
|
c2734108e7 | ||
|
3a5eacb4cf | ||
|
19bf21efba | ||
|
3d85217464 | ||
|
9e0ec3831f | ||
|
5d59f6562a | ||
|
67df86201a | ||
|
756566342d | ||
|
7ef169ed39 | ||
|
0a40ee5444 | ||
|
004f289074 | ||
|
8ab2d707e5 | ||
|
191dac26c0 | ||
|
d7459853d8 | ||
|
ee30db72a3 | ||
|
26e942b0ad | ||
|
6bbb8b4cd8 | ||
|
988231026a | ||
|
06bbc29614 | ||
|
5769cc8d06 | ||
|
0ddf7bd28a | ||
|
ce4a27a5f7 | ||
|
a8655f5c08 | ||
|
2d2c78d66c | ||
|
f25b8626bf | ||
|
c0c6b8075a | ||
|
9d9f8c6531 | ||
|
96b82ccd4d | ||
|
8053929b20 | ||
|
f0d6e63f55 | ||
|
2946153cea | ||
|
4dcd124dbd | ||
|
19ea51e460 | ||
|
fcbfa70c19 | ||
|
ba9aa7e2aa | ||
|
ab66ae8cd2 | ||
|
a3f4925c2c | ||
|
d4ce280fbc | ||
|
f81a839197 | ||
|
27777c8e68 | ||
|
4c40171c55 | ||
|
0926d81053 | ||
|
8fccaf20c5 | ||
|
c51f5c2a0a | ||
|
81ed4d8abf | ||
|
833aa324c2 | ||
|
7366647b43 | ||
|
e89d1b1ec3 | ||
|
99ce085415 | ||
|
b2b0b96051 | ||
|
65f2ba3802 | ||
|
77e4dc255f | ||
|
f8497921fe | ||
|
bc5e97295e | ||
|
bebca6d01c | ||
|
045eb155a2 | ||
|
c7e021a837 | ||
|
fecb9c9a76 | ||
|
bf3de9bfe8 | ||
|
8c574eb3cb | ||
|
da39715085 | ||
|
6c9cc199ef | ||
|
5834651c4a | ||
|
53de7f7cc3 | ||
|
4ab902d558 | ||
|
fb20b8dcc8 | ||
|
e3baa5aa08 | ||
|
d6632fefc9 | ||
|
75e1bbf128 | ||
|
2f164c2c41 | ||
|
d984776d35 | ||
|
144544cd37 | ||
|
3b244a69dc | ||
|
55b40b3d62 | ||
|
b6d63b3324 | ||
|
3f11ab800f | ||
|
daf472994d | ||
|
18a86ea104 | ||
|
b187450340 | ||
|
e4b9c80104 | ||
|
0c44309e15 | ||
|
1c31809652 | ||
|
1a261add61 | ||
|
de3400a521 | ||
|
ce40d12692 | ||
|
3547a26f86 | ||
|
de9e773764 | ||
|
d3eb985bb6 | ||
|
6a5e3816cf | ||
|
4f3e680b57 | ||
|
64cf35cccc | ||
|
4f0ce9be4e | ||
|
bad35d1730 | ||
|
a8318723a4 | ||
|
d79222894c | ||
|
4b72592594 | ||
|
ca9468ff04 | ||
|
4f3c89a6eb | ||
|
f76d427332 | ||
|
d3196318be | ||
|
c6f5f69644 | ||
|
4953ded639 | ||
|
e3ef239bc0 | ||
|
fd7bd911a6 | ||
|
21df5f0bd0 | ||
|
8a0263551d | ||
|
8da149ba40 | ||
|
fd2c64315b | ||
|
77b70664e5 | ||
|
9bebdeabda | ||
|
368695483d | ||
|
6cbc66a602 | ||
|
e0aadd4b34 | ||
|
e898d8bbc4 | ||
|
f2edacb02d | ||
|
2f0a333e9c | ||
|
8cc6bb961b | ||
|
cceff9f520 | ||
|
679810a3d2 | ||
|
8f25af89b6 | ||
|
229794a148 | ||
|
d31c9c73c7 | ||
|
0b7cf306f7 | ||
|
d9a372658a | ||
|
896a3b8311 | ||
|
c439c959f7 | ||
|
3fcb678d00 | ||
|
cafbb79d3a | ||
|
00b3fb4d14 | ||
|
8f3b8ade45 | ||
|
0398338a0f | ||
|
a16786d8ba | ||
|
71b9b87d88 | ||
|
ecd06d0110 | ||
|
b097f04a79 | ||
|
55815ab1ff | ||
|
94c37490d1 | ||
|
15f6ab73a5 | ||
|
a2931b813b | ||
|
51e3229528 | ||
|
4721d0b8ff | ||
|
0eff6a66d5 | ||
|
88745c9bb5 | ||
|
8ecf606230 | ||
|
b12d4beb8a | ||
|
326f180397 | ||
|
e2920aa925 | ||
|
6f7b6ae0c3 | ||
|
b2c224de69 | ||
|
5d96cf146e | ||
|
e58aca0602 | ||
|
f6f1c4eacb | ||
|
a187068e7c | ||
|
cdfd2ad4b1 | ||
|
e4ce59243b | ||
|
eaab09fccb | ||
|
d0ceb1b091 | ||
|
af7748139a | ||
|
64d24842fe | ||
|
62d55b71a3 | ||
|
0feb2ad35c | ||
|
8350e508d3 | ||
|
abe33220bf | ||
|
6a6f07053d | ||
|
0de4e1e9e2 | ||
|
72ebcb9a04 | ||
|
64976e426c | ||
|
e24276cab6 | ||
|
107e39f2de | ||
|
9b6bdf9449 | ||
|
7b83c550ab | ||
|
9fc713da89 | ||
|
c0f11a280e | ||
|
69a51cacb1 | ||
|
21e7979837 | ||
|
eb7ee82f16 | ||
|
820404946e | ||
|
19a3262387 | ||
|
c05cb3769f | ||
|
a71a6a05c3 | ||
|
2e7dae0f97 | ||
|
2192616770 | ||
|
29fe1cd688 | ||
|
10ab2861d5 | ||
|
ce1be3da4b | ||
|
2540ce58c0 | ||
|
05277ee864 | ||
|
13e7b64641 | ||
|
468d0e7ed1 | ||
|
bfac965f9c | ||
|
14f6cc2b7c | ||
|
87e71df597 | ||
|
3152c7dd1c | ||
|
3ea8f5e6b9 | ||
|
2a473f36fb | ||
|
ac9c52dfb4 | ||
|
f41319f31b | ||
|
099a932cbc | ||
|
4f4e3160be | ||
|
7324984127 | ||
|
0706dbf7e6 | ||
|
ad3ca3f556 | ||
|
eceec1d7fd | ||
|
b7b8223230 | ||
|
d2c1df7f3d | ||
|
b88ecd71fd | ||
|
605e70d0e1 | ||
|
fc5a6b5c4e | ||
|
f9ced0480e | ||
|
4a958ab909 | ||
|
ea78a629ba | ||
|
db569a2d61 | ||
|
710642827a | ||
|
5632ba3fa8 | ||
|
daa0908276 | ||
|
606240aec0 | ||
|
51a1097c64 | ||
|
df33548b39 | ||
|
a6c2a2071d | ||
|
556a0c3ea5 | ||
|
4807c11db8 | ||
|
3eaf371a22 | ||
|
e5d2ef4434 | ||
|
9c8d79fbe3 | ||
|
119af92620 | ||
|
3e729798df | ||
|
77b5779746 | ||
|
d3490aceb7 | ||
|
6d8ef03741 | ||
|
3f52df0ca9 | ||
|
d8a27e40e2 | ||
|
4ddc1c9c16 | ||
|
a8480baa11 | ||
|
eabaf0def8 | ||
|
11f79ea20e | ||
|
c03be5fe63 | ||
|
071d674065 | ||
|
cce3892f91 | ||
|
a935c5105d | ||
|
446c681b58 | ||
|
7f6c37c68e | ||
|
5351e3945b | ||
|
4b90f04c1f | ||
|
864da49139 | ||
|
6955042c10 | ||
|
02fdf903e8 | ||
|
30b2ec7025 | ||
|
a710d97748 | ||
|
b293939c24 | ||
|
0e57bb201c | ||
|
b28f9ecaa0 | ||
|
8d4a5ebf6e | ||
|
5f48c282d3 | ||
|
32a65e89e5 | ||
|
df4aec7e72 | ||
|
62ddab4b3a | ||
|
02f716907e | ||
|
7130efff54 | ||
|
519d2511ae | ||
|
c53e626c9a | ||
|
68c07d3e1e | ||
|
1e867c0fa0 | ||
|
9fba1bb649 | ||
|
13d7b48efe | ||
|
97469892c3 | ||
|
2d1583faba | ||
|
e4a2accf4a | ||
|
20326affde | ||
|
9af3dce3c8 | ||
|
03956053b8 | ||
|
1bbbcb5895 | ||
|
947f0e9964 | ||
|
780a1f5a4e | ||
|
dfff5119b4 | ||
|
f4bf49e891 | ||
|
22f71c152a | ||
|
5eb8107db2 | ||
|
cae823ddf0 | ||
|
93a289107b | ||
|
05afeb304d |
@ -3,9 +3,13 @@
|
||||
.github
|
||||
.venv
|
||||
cache
|
||||
data
|
||||
examples
|
||||
docker
|
||||
saves
|
||||
hf_cache
|
||||
ms_cache
|
||||
om_cache
|
||||
shared_data
|
||||
output
|
||||
.dockerignore
|
||||
.gitattributes
|
||||
.gitignore
|
||||
Dockerfile
|
||||
|
42
.env.local
Normal file
42
.env.local
Normal file
@ -0,0 +1,42 @@
|
||||
# Note: actually we do not support .env, just for reference
|
||||
# api
|
||||
API_HOST=
|
||||
API_PORT=
|
||||
API_KEY=
|
||||
API_MODEL_NAME=
|
||||
API_VERBOSE=
|
||||
FASTAPI_ROOT_PATH=
|
||||
MAX_CONCURRENT=
|
||||
# general
|
||||
DISABLE_VERSION_CHECK=
|
||||
FORCE_CHECK_IMPORTS=
|
||||
ALLOW_EXTRA_ARGS=
|
||||
LLAMAFACTORY_VERBOSITY=
|
||||
USE_MODELSCOPE_HUB=
|
||||
USE_OPENMIND_HUB=
|
||||
USE_RAY=
|
||||
RECORD_VRAM=
|
||||
OPTIM_TORCH=
|
||||
NPU_JIT_COMPILE=
|
||||
# torchrun
|
||||
FORCE_TORCHRUN=
|
||||
MASTER_ADDR=
|
||||
MASTER_PORT=
|
||||
NNODES=
|
||||
NODE_RANK=
|
||||
NPROC_PER_NODE=
|
||||
# wandb
|
||||
WANDB_DISABLED=
|
||||
WANDB_PROJECT=
|
||||
WANDB_API_KEY=
|
||||
# gradio ui
|
||||
GRADIO_SHARE=
|
||||
GRADIO_SERVER_NAME=
|
||||
GRADIO_SERVER_PORT=
|
||||
GRADIO_ROOT_PATH=
|
||||
GRADIO_IPV6=
|
||||
# setup
|
||||
ENABLE_SHORT_CONSOLE=
|
||||
# reserved (do not use)
|
||||
LLAMABOARD_ENABLED=
|
||||
LLAMABOARD_WORKDIR=
|
46
.github/CONTRIBUTING.md
vendored
46
.github/CONTRIBUTING.md
vendored
@ -19,3 +19,49 @@ There are several ways you can contribute to LLaMA Factory:
|
||||
### Style guide
|
||||
|
||||
LLaMA Factory follows the [Google Python Style Guide](https://google.github.io/styleguide/pyguide.html), check it for details.
|
||||
|
||||
### Create a Pull Request
|
||||
|
||||
1. Fork the [repository](https://github.com/hiyouga/LLaMA-Factory) by clicking on the [Fork](https://github.com/hiyouga/LLaMA-Factory/fork) button on the repository's page. This creates a copy of the code under your GitHub user account.
|
||||
|
||||
2. Clone your fork to your local disk, and add the base repository as a remote:
|
||||
|
||||
```bash
|
||||
git clone git@github.com:[username]/LLaMA-Factory.git
|
||||
cd LLaMA-Factory
|
||||
git remote add upstream https://github.com/hiyouga/LLaMA-Factory.git
|
||||
```
|
||||
|
||||
3. Create a new branch to hold your development changes:
|
||||
|
||||
```bash
|
||||
git checkout -b dev_your_branch
|
||||
```
|
||||
|
||||
4. Set up a development environment by running the following command in a virtual environment:
|
||||
|
||||
```bash
|
||||
pip install -e ".[dev]"
|
||||
```
|
||||
|
||||
If LLaMA Factory was already installed in the virtual environment, remove it with `pip uninstall llamafactory` before reinstalling it in editable mode with the -e flag.
|
||||
|
||||
5. Check code before commit:
|
||||
|
||||
```bash
|
||||
make commit
|
||||
make style && make quality
|
||||
make test
|
||||
```
|
||||
|
||||
6. Submit changes:
|
||||
|
||||
```bash
|
||||
git add .
|
||||
git commit -m "commit message"
|
||||
git fetch upstream
|
||||
git rebase upstream/main
|
||||
git push -u origin dev_your_branch
|
||||
```
|
||||
|
||||
7. Create a merge request from your branch `dev_your_branch` at [origin repo](https://github.com/hiyouga/LLaMA-Factory).
|
||||
|
61
.github/ISSUE_TEMPLATE/1-bug-report.yml
vendored
Normal file
61
.github/ISSUE_TEMPLATE/1-bug-report.yml
vendored
Normal file
@ -0,0 +1,61 @@
|
||||
name: "\U0001F41B Bug / help"
|
||||
description: Create a report to help us improve the LLaMA Factory
|
||||
labels: ["bug", "pending"]
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Issues included in **[FAQs](https://github.com/hiyouga/LLaMA-Factory/issues/4614)** or those with **insufficient** information may be closed without a response.
|
||||
已经包含在 **[常见问题](https://github.com/hiyouga/LLaMA-Factory/issues/4614)** 内或提供信息**不完整**的 issues 可能不会被回复。
|
||||
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Please do not create issues that are not related to framework bugs under this category, use **[Discussions](https://github.com/hiyouga/LLaMA-Factory/discussions/categories/q-a)** instead.
|
||||
请勿在此分类下创建和框架 bug 无关的 issues,训练问题求助请使用 **[讨论区](https://github.com/hiyouga/LLaMA-Factory/discussions/categories/q-a)**。
|
||||
|
||||
- type: checkboxes
|
||||
id: reminder
|
||||
attributes:
|
||||
label: Reminder
|
||||
description: |
|
||||
Please ensure you have read the above rules carefully and searched the existing issues (including FAQs).
|
||||
请确保您已经认真阅读了上述规则并且搜索过现有的 issues(包括常见问题)。
|
||||
|
||||
options:
|
||||
- label: I have read the above rules and searched the existing issues.
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: system-info
|
||||
validations:
|
||||
required: true
|
||||
attributes:
|
||||
label: System Info
|
||||
description: |
|
||||
Please share your system info with us. You can run the command **llamafactory-cli env** and copy-paste its output below.
|
||||
请提供您的系统信息。您可以在命令行运行 **llamafactory-cli env** 并将其输出复制到该文本框中。
|
||||
|
||||
placeholder: llamafactory version, platform, python version, ...
|
||||
|
||||
- type: textarea
|
||||
id: reproduction
|
||||
validations:
|
||||
required: true
|
||||
attributes:
|
||||
label: Reproduction
|
||||
description: |
|
||||
Please provide entry arguments, error messages and stack traces that reproduces the problem.
|
||||
请提供入口参数,错误日志以及异常堆栈以便于我们复现问题。
|
||||
|
||||
value: |
|
||||
```text
|
||||
Put your message here.
|
||||
```
|
||||
|
||||
- type: textarea
|
||||
id: others
|
||||
validations:
|
||||
required: false
|
||||
attributes:
|
||||
label: Others
|
41
.github/ISSUE_TEMPLATE/2-feature-request.yml
vendored
Normal file
41
.github/ISSUE_TEMPLATE/2-feature-request.yml
vendored
Normal file
@ -0,0 +1,41 @@
|
||||
name: "\U0001F680 Feature request"
|
||||
description: Submit a request for a new feature
|
||||
labels: ["enhancement", "pending"]
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Please do not create issues that are not related to new features under this category.
|
||||
请勿在此分类下创建和新特性无关的 issues。
|
||||
|
||||
- type: checkboxes
|
||||
id: reminder
|
||||
attributes:
|
||||
label: Reminder
|
||||
description: |
|
||||
Please ensure you have read the above rules carefully and searched the existing issues.
|
||||
请确保您已经认真阅读了上述规则并且搜索过现有的 issues。
|
||||
|
||||
options:
|
||||
- label: I have read the above rules and searched the existing issues.
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: description
|
||||
validations:
|
||||
required: true
|
||||
attributes:
|
||||
label: Description
|
||||
description: |
|
||||
A clear and concise description of the feature proposal.
|
||||
请详细描述您希望加入的新功能特性。
|
||||
|
||||
- type: textarea
|
||||
id: contribution
|
||||
validations:
|
||||
required: false
|
||||
attributes:
|
||||
label: Pull Request
|
||||
description: |
|
||||
Have you already created the relevant PR and submitted the code?
|
||||
您是否已经创建了相关 PR 并提交了代码?
|
58
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
58
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
@ -1,58 +0,0 @@
|
||||
name: "\U0001F41B Bug / Help"
|
||||
description: Create a report to help us improve the LLaMA Factory
|
||||
body:
|
||||
- type: checkboxes
|
||||
id: reminder
|
||||
attributes:
|
||||
label: Reminder
|
||||
description: |
|
||||
Please ensure you have read the README carefully and searched the existing issues.
|
||||
请确保您已经认真阅读了 README 并且搜索过现有的 Issue。
|
||||
|
||||
options:
|
||||
- label: I have read the README and searched the existing issues.
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: reproduction
|
||||
validations:
|
||||
required: true
|
||||
attributes:
|
||||
label: Reproduction
|
||||
description: |
|
||||
Please provide code snippets, error messages and stack traces that reproduces the problem.
|
||||
请提供运行参数,错误信息以及异常堆栈以便于我们复现该问题。
|
||||
Remember to use Markdown tags to correctly format your code.
|
||||
请合理使用 Markdown 标签来格式化您的文本。
|
||||
|
||||
placeholder: |
|
||||
python src/train_bash.py ...
|
||||
|
||||
- type: textarea
|
||||
id: expected-behavior
|
||||
validations:
|
||||
required: false
|
||||
attributes:
|
||||
label: Expected behavior
|
||||
description: |
|
||||
Please provide a clear and concise description of what you would expect to happen.
|
||||
请提供您原本的目的,即这段代码的期望行为。
|
||||
|
||||
- type: textarea
|
||||
id: system-info
|
||||
validations:
|
||||
required: false
|
||||
attributes:
|
||||
label: System Info
|
||||
description: |
|
||||
Please share your system info with us. You can run the command **transformers-cli env** and copy-paste its output below.
|
||||
请提供您的系统信息。您可以在命令行运行 **transformers-cli env** 并将其输出复制到该文本框中。
|
||||
|
||||
placeholder: transformers version, platform, python version, ...
|
||||
|
||||
- type: textarea
|
||||
id: others
|
||||
validations:
|
||||
required: false
|
||||
attributes:
|
||||
label: Others
|
8
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
8
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: 📚 FAQs | 常见问题
|
||||
url: https://github.com/hiyouga/LLaMA-Factory/issues/4614
|
||||
about: Reading in advance is recommended | 建议提前阅读
|
||||
- name: Discussions | 讨论区
|
||||
url: https://github.com/hiyouga/LLaMA-Factory/discussions
|
||||
about: Please ask fine-tuning questions here | 请在这里讨论训练问题
|
1
.github/PULL_REQUEST_TEMPLATE.md
vendored
1
.github/PULL_REQUEST_TEMPLATE.md
vendored
@ -5,3 +5,4 @@ Fixes # (issue)
|
||||
## Before submitting
|
||||
|
||||
- [ ] Did you read the [contributor guideline](https://github.com/hiyouga/LLaMA-Factory/blob/main/.github/CONTRIBUTING.md)?
|
||||
- [ ] Did you write any new necessary tests?
|
||||
|
108
.github/workflows/docker.yml
vendored
Normal file
108
.github/workflows/docker.yml
vendored
Normal file
@ -0,0 +1,108 @@
|
||||
name: docker
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- "main"
|
||||
paths:
|
||||
- "**/*.py"
|
||||
- "requirements.txt"
|
||||
- "docker/**"
|
||||
- ".github/workflows/*.yml"
|
||||
pull_request:
|
||||
branches:
|
||||
- "main"
|
||||
paths:
|
||||
- "**/*.py"
|
||||
- "requirements.txt"
|
||||
- "docker/**"
|
||||
- ".github/workflows/*.yml"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
device:
|
||||
- "cuda"
|
||||
- "npu"
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}-${{ matrix.device }}
|
||||
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
|
||||
|
||||
environment:
|
||||
name: docker
|
||||
url: https://hub.docker.com/r/hiyouga/llamafactory
|
||||
|
||||
steps:
|
||||
- name: Free up disk space
|
||||
uses: jlumbroso/free-disk-space@54081f138730dfa15788a46383842cd2f914a1be # v1.3.1
|
||||
with:
|
||||
tool-cache: true
|
||||
docker-images: false
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.9"
|
||||
|
||||
- name: Get llamafactory version
|
||||
id: version
|
||||
run: |
|
||||
echo "tag=$(python setup.py --version | sed 's/\.dev0//')" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Login to Docker Hub
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ vars.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Login to Quay
|
||||
if: ${{ github.event_name != 'pull_request' && matrix.device == 'npu' }}
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ vars.QUAY_ASCEND_USERNAME }}
|
||||
password: ${{ secrets.QUAY_ASCEND_TOKEN }}
|
||||
|
||||
- name: Build and push Docker image (CUDA)
|
||||
if: ${{ matrix.device == 'cuda' }}
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
file: ./docker/docker-cuda/Dockerfile
|
||||
build-args: |
|
||||
EXTRAS=metrics,deepspeed,liger-kernel
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
tags: |
|
||||
docker.io/hiyouga/llamafactory:latest
|
||||
docker.io/hiyouga/llamafactory:${{ steps.version.outputs.tag }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
- name: Build and push Docker image (NPU)
|
||||
if: ${{ matrix.device == 'npu' }}
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
platforms: linux/amd64,linux/arm64
|
||||
file: ./docker/docker-npu/Dockerfile
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
tags: |
|
||||
docker.io/hiyouga/llamafactory:latest-npu-a2
|
||||
docker.io/hiyouga/llamafactory:${{ steps.version.outputs.tag }}-npu-a2
|
||||
quay.io/ascend/llamafactory:latest-npu-a2
|
||||
quay.io/ascend/llamafactory:${{ steps.version.outputs.tag }}-npu-a2
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
32
.github/workflows/label_issue.yml
vendored
Normal file
32
.github/workflows/label_issue.yml
vendored
Normal file
@ -0,0 +1,32 @@
|
||||
name: label_issue
|
||||
|
||||
on:
|
||||
issues:
|
||||
types:
|
||||
- opened
|
||||
|
||||
jobs:
|
||||
label_issue:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
|
||||
steps:
|
||||
- env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
ISSUE_URL: ${{ github.event.issue.html_url }}
|
||||
ISSUE_TITLE: ${{ github.event.issue.title }}
|
||||
run: |
|
||||
LABEL=""
|
||||
NPU_KEYWORDS=(npu huawei ascend 华为 昇腾 910)
|
||||
ISSUE_TITLE_LOWER=$(echo $ISSUE_TITLE | tr '[:upper:]' '[:lower:]')
|
||||
for KEYWORD in ${NPU_KEYWORDS[@]}; do
|
||||
if [[ $ISSUE_TITLE_LOWER == *$KEYWORD* ]] && [[ $ISSUE_TITLE_LOWER != *input* ]]; then
|
||||
LABEL="npu"
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [ -n "$LABEL" ]; then
|
||||
gh issue edit $ISSUE_URL --add-label $LABEL
|
||||
fi
|
36
.github/workflows/publish.yml
vendored
Normal file
36
.github/workflows/publish.yml
vendored
Normal file
@ -0,0 +1,36 @@
|
||||
name: publish
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
release:
|
||||
types:
|
||||
- published
|
||||
|
||||
jobs:
|
||||
publish:
|
||||
name: Upload release to PyPI
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
environment:
|
||||
name: release
|
||||
url: https://pypi.org/p/llamafactory
|
||||
|
||||
permissions:
|
||||
id-token: write
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.9"
|
||||
|
||||
- name: Build package
|
||||
run: |
|
||||
make build
|
||||
|
||||
- name: Publish package
|
||||
uses: pypa/gh-action-pypi-publish@release/v1
|
86
.github/workflows/tests.yml
vendored
86
.github/workflows/tests.yml
vendored
@ -1,29 +1,99 @@
|
||||
name: tests
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches: [ "main" ]
|
||||
branches:
|
||||
- "main"
|
||||
paths:
|
||||
- "**/*.py"
|
||||
- "requirements.txt"
|
||||
- ".github/workflows/*.yml"
|
||||
pull_request:
|
||||
branches: [ "main" ]
|
||||
branches:
|
||||
- "main"
|
||||
paths:
|
||||
- "**/*.py"
|
||||
- "requirements.txt"
|
||||
- ".github/workflows/*.yml"
|
||||
|
||||
jobs:
|
||||
check_code_quality:
|
||||
tests:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python:
|
||||
- "3.9"
|
||||
- "3.10"
|
||||
- "3.11"
|
||||
- "3.12"
|
||||
os:
|
||||
- "ubuntu-latest"
|
||||
- "windows-latest"
|
||||
- "macos-13"
|
||||
transformers:
|
||||
- null
|
||||
include: # test backward compatibility
|
||||
- python: "3.9"
|
||||
os: "ubuntu-latest"
|
||||
transformers: "4.49.0"
|
||||
- python: "3.9"
|
||||
os: "ubuntu-latest"
|
||||
transformers: "4.51.0"
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ${{ matrix.os }}
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}-${{ matrix.os }}-${{ matrix.python }}-${{ matrix.transformers }}
|
||||
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
|
||||
|
||||
env:
|
||||
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
||||
OS_NAME: ${{ matrix.os }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.8"
|
||||
python-version: ${{ matrix.python }}
|
||||
cache: "pip"
|
||||
cache-dependency-path: "**/requirements*.txt"
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
python -m pip install ruff
|
||||
python -m pip install ".[torch,dev]"
|
||||
|
||||
- name: Install transformers
|
||||
if: ${{ matrix.transformers }}
|
||||
run: |
|
||||
python -m pip install "transformers==${{ matrix.transformers }}"
|
||||
|
||||
- name: Cache files
|
||||
id: hf-hub-cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ${{ runner.temp }}/huggingface
|
||||
key: huggingface-${{ matrix.os }}-${{ matrix.python }}-${{ matrix.transformers }}-${{ hashFiles('tests/version.txt') }}
|
||||
|
||||
- name: Check quality
|
||||
run: |
|
||||
make style && make quality
|
||||
make style && make quality
|
||||
|
||||
- name: Check license
|
||||
run: |
|
||||
make license
|
||||
|
||||
- name: Check build
|
||||
run: |
|
||||
make build
|
||||
|
||||
- name: Test with pytest
|
||||
run: |
|
||||
make test
|
||||
env:
|
||||
HF_HOME: ${{ runner.temp }}/huggingface
|
||||
HF_HUB_OFFLINE: "${{ steps.hf-hub-cache.outputs.cache-hit == 'true' && '1' || '0' }}"
|
||||
|
18
.gitignore
vendored
18
.gitignore
vendored
@ -159,7 +159,21 @@ cython_debug/
|
||||
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
||||
.idea/
|
||||
|
||||
# vscode
|
||||
.vscode/
|
||||
|
||||
# uv
|
||||
uv.lock
|
||||
|
||||
# custom .gitignore
|
||||
user.config
|
||||
saves/
|
||||
hf_cache/
|
||||
ms_cache/
|
||||
om_cache/
|
||||
cache/
|
||||
config/
|
||||
saves/
|
||||
output/
|
||||
wandb/
|
||||
swanlog/
|
||||
generated_predictions.jsonl
|
||||
predictions_score.json
|
||||
|
28
.pre-commit-config.yaml
Normal file
28
.pre-commit-config.yaml
Normal file
@ -0,0 +1,28 @@
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v5.0.0
|
||||
hooks:
|
||||
- id: check-ast
|
||||
- id: check-added-large-files
|
||||
args: ['--maxkb=25000']
|
||||
- id: check-merge-conflict
|
||||
- id: check-yaml
|
||||
- id: debug-statements
|
||||
- id: end-of-file-fixer
|
||||
- id: trailing-whitespace
|
||||
args: [--markdown-linebreak-ext=md]
|
||||
- id: no-commit-to-branch
|
||||
args: ['--branch', 'main']
|
||||
|
||||
- repo: https://github.com/asottile/pyupgrade
|
||||
rev: v3.17.0
|
||||
hooks:
|
||||
- id: pyupgrade
|
||||
args: [--py38-plus]
|
||||
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
rev: v0.6.9
|
||||
hooks:
|
||||
- id: ruff
|
||||
args: [--fix]
|
||||
- id: ruff-format
|
11
CITATION.cff
11
CITATION.cff
@ -12,12 +12,16 @@ authors:
|
||||
given-names: "Yanhan"
|
||||
- family-names: "Luo"
|
||||
given-names: "Zheyan"
|
||||
- family-names: "Feng"
|
||||
given-names: "Zhangchi"
|
||||
- family-names: "Ma"
|
||||
given-names: "Yongqiang"
|
||||
title: "LlamaFactory: Unified Efficient Fine-Tuning of 100+ Language Models"
|
||||
url: "https://arxiv.org/abs/2403.13372"
|
||||
preferred-citation:
|
||||
type: article
|
||||
type: conference-paper
|
||||
conference:
|
||||
name: "Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 3: System Demonstrations)"
|
||||
authors:
|
||||
- family-names: "Zheng"
|
||||
given-names: "Yaowei"
|
||||
@ -29,9 +33,12 @@ preferred-citation:
|
||||
given-names: "Yanhan"
|
||||
- family-names: "Luo"
|
||||
given-names: "Zheyan"
|
||||
- family-names: "Feng"
|
||||
given-names: "Zhangchi"
|
||||
- family-names: "Ma"
|
||||
given-names: "Yongqiang"
|
||||
journal: "arXiv preprint arXiv:2403.13372"
|
||||
title: "LlamaFactory: Unified Efficient Fine-Tuning of 100+ Language Models"
|
||||
url: "https://arxiv.org/abs/2403.13372"
|
||||
year: 2024
|
||||
publisher: "Association for Computational Linguistics"
|
||||
address: "Bangkok, Thailand"
|
||||
|
14
Dockerfile
14
Dockerfile
@ -1,14 +0,0 @@
|
||||
FROM nvcr.io/nvidia/pytorch:24.01-py3
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY requirements.txt /app/
|
||||
RUN pip install -r requirements.txt
|
||||
|
||||
COPY . /app/
|
||||
RUN pip install -e .[deepspeed,metrics,bitsandbytes,qwen]
|
||||
|
||||
VOLUME [ "/root/.cache/huggingface/", "/app/data", "/app/output" ]
|
||||
EXPOSE 7860
|
||||
|
||||
CMD [ "llamafactory-cli", "webui" ]
|
1
MANIFEST.in
Normal file
1
MANIFEST.in
Normal file
@ -0,0 +1 @@
|
||||
include LICENSE requirements.txt
|
17
Makefile
17
Makefile
@ -1,6 +1,16 @@
|
||||
.PHONY: quality style
|
||||
.PHONY: build commit license quality style test
|
||||
|
||||
check_dirs := scripts src tests
|
||||
check_dirs := scripts src tests setup.py
|
||||
|
||||
build:
|
||||
pip3 install build && python3 -m build
|
||||
|
||||
commit:
|
||||
pre-commit install
|
||||
pre-commit run --all-files
|
||||
|
||||
license:
|
||||
python3 tests/check_license.py $(check_dirs)
|
||||
|
||||
quality:
|
||||
ruff check $(check_dirs)
|
||||
@ -9,3 +19,6 @@ quality:
|
||||
style:
|
||||
ruff check $(check_dirs) --fix
|
||||
ruff format $(check_dirs)
|
||||
|
||||
test:
|
||||
CUDA_VISIBLE_DEVICES= WANDB_DISABLED=true pytest -vv tests/
|
||||
|
757
README_zh.md
757
README_zh.md
File diff suppressed because it is too large
Load Diff
38
assets/alaya_new.svg
Normal file
38
assets/alaya_new.svg
Normal file
File diff suppressed because one or more lines are too long
After Width: | Height: | Size: 47 KiB |
1216
assets/benchmark.svg
1216
assets/benchmark.svg
File diff suppressed because it is too large
Load Diff
Before Width: | Height: | Size: 29 KiB |
Binary file not shown.
Before Width: | Height: | Size: 145 KiB After Width: | Height: | Size: 166 KiB |
BIN
assets/wechat_alaya.png
Normal file
BIN
assets/wechat_alaya.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 209 KiB |
BIN
assets/wechat_npu.jpg
Normal file
BIN
assets/wechat_npu.jpg
Normal file
Binary file not shown.
After Width: | Height: | Size: 171 KiB |
353
data/README.md
353
data/README.md
@ -1,16 +1,22 @@
|
||||
If you are using a custom dataset, please add your **dataset description** to `dataset_info.json` according to the following format. We also provide several examples in the next section.
|
||||
The [dataset_info.json](dataset_info.json) contains all available datasets. If you are using a custom dataset, please **make sure** to add a *dataset description* in `dataset_info.json` and specify `dataset: dataset_name` before training to use it.
|
||||
|
||||
The `dataset_info.json` file should be put in the `dataset_dir` directory. You can change `dataset_dir` to use another directory. The default value is `./data`.
|
||||
|
||||
Currently we support datasets in **alpaca** and **sharegpt** format. Allowed file types include json, jsonl, csv, parquet, arrow.
|
||||
|
||||
```json
|
||||
"dataset_name": {
|
||||
"hf_hub_url": "the name of the dataset repository on the Hugging Face hub. (if specified, ignore script_url and file_name)",
|
||||
"ms_hub_url": "the name of the dataset repository on the ModelScope hub. (if specified, ignore script_url and file_name)",
|
||||
"script_url": "the name of the directory containing a dataset loading script. (if specified, ignore file_name)",
|
||||
"file_name": "the name of the dataset file in this directory. (required if above are not specified)",
|
||||
"file_sha1": "the SHA-1 hash value of the dataset file. (optional, does not affect training)",
|
||||
"subset": "the name of the subset. (optional, default: None)",
|
||||
"folder": "the name of the folder of the dataset repository on the Hugging Face hub. (optional, default: None)",
|
||||
"ranking": "whether the dataset is a preference dataset or not. (default: false)",
|
||||
"hf_hub_url": "the name of the dataset repository on the Hugging Face hub. (if specified, ignore script_url, file_name and cloud_file_name)",
|
||||
"ms_hub_url": "the name of the dataset repository on the Model Scope hub. (if specified, ignore script_url, file_name and cloud_file_name)",
|
||||
"script_url": "the name of the directory containing a dataset loading script. (if specified, ignore file_name and cloud_file_name)",
|
||||
"cloud_file_name": "the name of the dataset file in s3/gcs cloud storage. (if specified, ignore file_name)",
|
||||
"file_name": "the name of the dataset folder or dataset file in this directory. (required if above are not specified)",
|
||||
"formatting": "the format of the dataset. (optional, default: alpaca, can be chosen from {alpaca, sharegpt})",
|
||||
"ranking": "whether the dataset is a preference dataset or not. (default: False)",
|
||||
"subset": "the name of the subset. (optional, default: None)",
|
||||
"split": "the name of dataset split to be used. (optional, default: train)",
|
||||
"folder": "the name of the folder of the dataset repository on the Hugging Face hub. (optional, default: None)",
|
||||
"num_samples": "the number of samples in the dataset to be used. (optional, default: None)",
|
||||
"columns (optional)": {
|
||||
"prompt": "the column name in the dataset containing the prompts. (default: instruction)",
|
||||
"query": "the column name in the dataset containing the queries. (default: input)",
|
||||
@ -19,7 +25,12 @@ If you are using a custom dataset, please add your **dataset description** to `d
|
||||
"messages": "the column name in the dataset containing the messages. (default: conversations)",
|
||||
"system": "the column name in the dataset containing the system prompts. (default: None)",
|
||||
"tools": "the column name in the dataset containing the tool description. (default: None)",
|
||||
"images": "the column name in the dataset containing the image inputs. (default: None)"
|
||||
"images": "the column name in the dataset containing the image inputs. (default: None)",
|
||||
"videos": "the column name in the dataset containing the videos inputs. (default: None)",
|
||||
"audios": "the column name in the dataset containing the audios inputs. (default: None)",
|
||||
"chosen": "the column name in the dataset containing the chosen answers. (default: None)",
|
||||
"rejected": "the column name in the dataset containing the rejected answers. (default: None)",
|
||||
"kto_tag": "the column name in the dataset containing the kto tags. (default: None)"
|
||||
},
|
||||
"tags (optional, used for the sharegpt format)": {
|
||||
"role_tag": "the key in the message represents the identity. (default: from)",
|
||||
@ -33,11 +44,19 @@ If you are using a custom dataset, please add your **dataset description** to `d
|
||||
}
|
||||
```
|
||||
|
||||
After that, you can load the custom dataset by specifying `--dataset dataset_name`.
|
||||
## Alpaca Format
|
||||
|
||||
----
|
||||
### Supervised Fine-Tuning Dataset
|
||||
|
||||
Currently we support dataset in **alpaca** or **sharegpt** format, the dataset in alpaca format should follow the below format:
|
||||
* [Example dataset](alpaca_en_demo.json)
|
||||
|
||||
In supervised fine-tuning, the `instruction` column will be concatenated with the `input` column and used as the user prompt, then the user prompt would be `instruction\ninput`. The `output` column represents the model response.
|
||||
|
||||
For reasoning models, if the dataset contains chain-of-thought (CoT), the CoT needs to be placed in the model responses, such as `<think>cot</think>output`.
|
||||
|
||||
The `system` column will be used as the system prompt if specified.
|
||||
|
||||
The `history` column is a list consisting of string tuples representing prompt-response pairs in the history messages. Note that the responses in the history **will also be learned by the model** in supervised fine-tuning.
|
||||
|
||||
```json
|
||||
[
|
||||
@ -54,7 +73,7 @@ Currently we support dataset in **alpaca** or **sharegpt** format, the dataset i
|
||||
]
|
||||
```
|
||||
|
||||
Regarding the above dataset, the description in `dataset_info.json` should be:
|
||||
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
|
||||
|
||||
```json
|
||||
"dataset_name": {
|
||||
@ -69,11 +88,16 @@ Regarding the above dataset, the description in `dataset_info.json` should be:
|
||||
}
|
||||
```
|
||||
|
||||
The `query` column will be concatenated with the `prompt` column and used as the user prompt, then the user prompt would be `prompt\nquery`. The `response` column represents the model response.
|
||||
> [!TIP]
|
||||
> If the model has reasoning capabilities (e.g. Qwen3) but the dataset does not contain chain-of-thought (CoT), LLaMA-Factory will automatically add empty CoT to the data. When `enable_thinking` is `True` (slow thinking, by default), the empty CoT will be added to the model responses and loss computation will be considered; otherwise (fast thinking), it will be added to the user prompts and loss computation will be ignored. Please keep the `enable_thinking` parameter consistent during training and inference.
|
||||
>
|
||||
> If you want to train data containing CoT with slow thinking and data without CoT with fast thinking, you can set `enable_thinking` to `None`. However, this feature is relatively complicated and should be used with caution.
|
||||
|
||||
The `system` column will be used as the system prompt. The `history` column is a list consisting string tuples representing prompt-response pairs in the history. Note that the responses in the history **will also be used for training** in supervised fine-tuning.
|
||||
### Pre-training Dataset
|
||||
|
||||
For the **pre-training datasets**, only the `prompt` column will be used for training, for example:
|
||||
- [Example dataset](c4_demo.jsonl)
|
||||
|
||||
In pre-training, only the `text` column will be used for model learning.
|
||||
|
||||
```json
|
||||
[
|
||||
@ -82,7 +106,7 @@ For the **pre-training datasets**, only the `prompt` column will be used for tra
|
||||
]
|
||||
```
|
||||
|
||||
Regarding the above dataset, the description in `dataset_info.json` should be:
|
||||
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
|
||||
|
||||
```json
|
||||
"dataset_name": {
|
||||
@ -93,22 +117,24 @@ Regarding the above dataset, the description in `dataset_info.json` should be:
|
||||
}
|
||||
```
|
||||
|
||||
For the **preference datasets**, the `response` column should be a string list whose length is 2, with the preferred answers appearing first, for example:
|
||||
### Preference Dataset
|
||||
|
||||
Preference datasets are used for reward modeling, DPO training, ORPO and SimPO training.
|
||||
|
||||
It requires a better response in `chosen` column and a worse response in `rejected` column.
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"instruction": "user instruction",
|
||||
"input": "user input",
|
||||
"output": [
|
||||
"chosen answer",
|
||||
"rejected answer"
|
||||
]
|
||||
"instruction": "user instruction (required)",
|
||||
"input": "user input (optional)",
|
||||
"chosen": "chosen answer (required)",
|
||||
"rejected": "rejected answer (required)"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
Regarding the above dataset, the description in `dataset_info.json` should be:
|
||||
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
|
||||
|
||||
```json
|
||||
"dataset_name": {
|
||||
@ -117,14 +143,138 @@ Regarding the above dataset, the description in `dataset_info.json` should be:
|
||||
"columns": {
|
||||
"prompt": "instruction",
|
||||
"query": "input",
|
||||
"response": "output",
|
||||
"chosen": "chosen",
|
||||
"rejected": "rejected"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
----
|
||||
### KTO Dataset
|
||||
|
||||
The dataset in **sharegpt** format should follow the below format:
|
||||
An additional column `kto_tag` is required. Please refer to the [sharegpt](#sharegpt-format) format for details.
|
||||
|
||||
### Multimodal Image Dataset
|
||||
|
||||
An additional column `images` is required. Please refer to the [sharegpt](#sharegpt-format) format for details.
|
||||
|
||||
### Multimodal Video Dataset
|
||||
|
||||
An additional column `videos` is required. Please refer to the [sharegpt](#sharegpt-format) format for details.
|
||||
|
||||
### Multimodal Audio Dataset
|
||||
|
||||
An additional column `audios` is required. Please refer to the [sharegpt](#sharegpt-format) format for details.
|
||||
|
||||
## Sharegpt Format
|
||||
|
||||
### Supervised Fine-Tuning Dataset
|
||||
|
||||
- [Example dataset](glaive_toolcall_en_demo.json)
|
||||
|
||||
Compared to the alpaca format, the sharegpt format allows the datasets have **more roles**, such as human, gpt, observation and function. They are presented in a list of objects in the `conversations` column.
|
||||
|
||||
Note that the human and observation should appear in odd positions, while gpt and function should appear in even positions. The gpt and function will be learned by the model.
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"conversations": [
|
||||
{
|
||||
"from": "human",
|
||||
"value": "user instruction"
|
||||
},
|
||||
{
|
||||
"from": "function_call",
|
||||
"value": "tool arguments"
|
||||
},
|
||||
{
|
||||
"from": "observation",
|
||||
"value": "tool result"
|
||||
},
|
||||
{
|
||||
"from": "gpt",
|
||||
"value": "model response"
|
||||
}
|
||||
],
|
||||
"system": "system prompt (optional)",
|
||||
"tools": "tool description (optional)"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
|
||||
|
||||
```json
|
||||
"dataset_name": {
|
||||
"file_name": "data.json",
|
||||
"formatting": "sharegpt",
|
||||
"columns": {
|
||||
"messages": "conversations",
|
||||
"system": "system",
|
||||
"tools": "tools"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Pre-training Dataset
|
||||
|
||||
Not yet supported, please use the [alpaca](#alpaca-format) format.
|
||||
|
||||
### Preference Dataset
|
||||
|
||||
- [Example dataset](dpo_en_demo.json)
|
||||
|
||||
Preference datasets in sharegpt format also require a better message in `chosen` column and a worse message in `rejected` column.
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"conversations": [
|
||||
{
|
||||
"from": "human",
|
||||
"value": "user instruction"
|
||||
},
|
||||
{
|
||||
"from": "gpt",
|
||||
"value": "model response"
|
||||
},
|
||||
{
|
||||
"from": "human",
|
||||
"value": "user instruction"
|
||||
}
|
||||
],
|
||||
"chosen": {
|
||||
"from": "gpt",
|
||||
"value": "chosen answer (required)"
|
||||
},
|
||||
"rejected": {
|
||||
"from": "gpt",
|
||||
"value": "rejected answer (required)"
|
||||
}
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
|
||||
|
||||
```json
|
||||
"dataset_name": {
|
||||
"file_name": "data.json",
|
||||
"formatting": "sharegpt",
|
||||
"ranking": true,
|
||||
"columns": {
|
||||
"messages": "conversations",
|
||||
"chosen": "chosen",
|
||||
"rejected": "rejected"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### KTO Dataset
|
||||
|
||||
- [Example dataset](kto_en_demo.json)
|
||||
|
||||
KTO datasets require a extra `kto_tag` column containing the boolean human feedback.
|
||||
|
||||
```json
|
||||
[
|
||||
@ -139,13 +289,12 @@ The dataset in **sharegpt** format should follow the below format:
|
||||
"value": "model response"
|
||||
}
|
||||
],
|
||||
"system": "system prompt (optional)",
|
||||
"tools": "tool description (optional)"
|
||||
"kto_tag": "human feedback [true/false] (required)"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
Regarding the above dataset, the description in `dataset_info.json` should be:
|
||||
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
|
||||
|
||||
```json
|
||||
"dataset_name": {
|
||||
@ -153,21 +302,137 @@ Regarding the above dataset, the description in `dataset_info.json` should be:
|
||||
"formatting": "sharegpt",
|
||||
"columns": {
|
||||
"messages": "conversations",
|
||||
"system": "system",
|
||||
"tools": "tools"
|
||||
},
|
||||
"tags": {
|
||||
"role_tag": "from",
|
||||
"content_tag": "value",
|
||||
"user_tag": "human",
|
||||
"assistant_tag": "gpt"
|
||||
"kto_tag": "kto_tag"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
where the `messages` column should be a list following the `u/a/u/a/u/a` order.
|
||||
### Multimodal Image Dataset
|
||||
|
||||
We also supports the dataset in the **openai** format:
|
||||
- [Example dataset](mllm_demo.json)
|
||||
|
||||
Multimodal image datasets require an `images` column containing the paths to the input images.
|
||||
|
||||
The number of images should be identical to the `<image>` tokens in the conversations.
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"conversations": [
|
||||
{
|
||||
"from": "human",
|
||||
"value": "<image>user instruction"
|
||||
},
|
||||
{
|
||||
"from": "gpt",
|
||||
"value": "model response"
|
||||
}
|
||||
],
|
||||
"images": [
|
||||
"image path (required)"
|
||||
]
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
|
||||
|
||||
```json
|
||||
"dataset_name": {
|
||||
"file_name": "data.json",
|
||||
"formatting": "sharegpt",
|
||||
"columns": {
|
||||
"messages": "conversations",
|
||||
"images": "images"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Multimodal Video Dataset
|
||||
|
||||
- [Example dataset](mllm_video_demo.json)
|
||||
|
||||
Multimodal video datasets require a `videos` column containing the paths to the input videos.
|
||||
|
||||
The number of videos should be identical to the `<video>` tokens in the conversations.
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"conversations": [
|
||||
{
|
||||
"from": "human",
|
||||
"value": "<video>user instruction"
|
||||
},
|
||||
{
|
||||
"from": "gpt",
|
||||
"value": "model response"
|
||||
}
|
||||
],
|
||||
"videos": [
|
||||
"video path (required)"
|
||||
]
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
|
||||
|
||||
```json
|
||||
"dataset_name": {
|
||||
"file_name": "data.json",
|
||||
"formatting": "sharegpt",
|
||||
"columns": {
|
||||
"messages": "conversations",
|
||||
"videos": "videos"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Multimodal Audio Dataset
|
||||
|
||||
- [Example dataset](mllm_audio_demo.json)
|
||||
|
||||
Multimodal audio datasets require an `audios` column containing the paths to the input audios.
|
||||
|
||||
The number of audios should be identical to the `<audio>` tokens in the conversations.
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"conversations": [
|
||||
{
|
||||
"from": "human",
|
||||
"value": "<audio>user instruction"
|
||||
},
|
||||
{
|
||||
"from": "gpt",
|
||||
"value": "model response"
|
||||
}
|
||||
],
|
||||
"audios": [
|
||||
"audio path (required)"
|
||||
]
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
|
||||
|
||||
```json
|
||||
"dataset_name": {
|
||||
"file_name": "data.json",
|
||||
"formatting": "sharegpt",
|
||||
"columns": {
|
||||
"messages": "conversations",
|
||||
"audios": "audios"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### OpenAI Format
|
||||
|
||||
The openai format is simply a special case of the sharegpt format, where the first message may be a system prompt.
|
||||
|
||||
```json
|
||||
[
|
||||
@ -190,7 +455,7 @@ We also supports the dataset in the **openai** format:
|
||||
]
|
||||
```
|
||||
|
||||
Regarding the above dataset, the description in `dataset_info.json` should be:
|
||||
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
|
||||
|
||||
```json
|
||||
"dataset_name": {
|
||||
@ -208,5 +473,3 @@ Regarding the above dataset, the description in `dataset_info.json` should be:
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Pre-training datasets and preference datasets are **incompatible** with the sharegpt format yet.
|
||||
|
@ -1,16 +1,21 @@
|
||||
如果您使用自定义数据集,请务必按照以下格式在 `dataset_info.json` 文件中添加**数据集描述**。我们在下面也提供了一些例子。
|
||||
[dataset_info.json](dataset_info.json) 包含了所有可用的数据集。如果您希望使用自定义数据集,请**务必**在 `dataset_info.json` 文件中添加*数据集描述*,并通过修改 `dataset: 数据集名称` 配置来使用数据集。
|
||||
|
||||
其中 `dataset_info.json` 文件应放置在 `dataset_dir` 目录下。您可以通过修改 `dataset_dir` 参数来使用其他目录。默认值为 `./data`。
|
||||
|
||||
目前我们支持 **alpaca** 格式和 **sharegpt** 格式的数据集。允许的文件类型包括 json、jsonl、csv、parquet 和 arrow。
|
||||
|
||||
```json
|
||||
"数据集名称": {
|
||||
"hf_hub_url": "Hugging Face 的数据集仓库地址(若指定,则忽略 script_url 和 file_name)",
|
||||
"ms_hub_url": "ModelScope 的数据集仓库地址(若指定,则忽略 script_url 和 file_name)",
|
||||
"script_url": "包含数据加载脚本的本地文件夹名称(若指定,则忽略 file_name)",
|
||||
"file_name": "该目录下数据集文件的名称(若上述参数未指定,则此项必需)",
|
||||
"file_sha1": "数据集文件的 SHA-1 哈希值(可选,留空不影响训练)",
|
||||
"subset": "数据集子集的名称(可选,默认:None)",
|
||||
"folder": "Hugging Face 仓库的文件夹名称(可选,默认:None)",
|
||||
"ranking": "是否为偏好数据集(可选,默认:False)",
|
||||
"file_name": "该目录下数据集文件夹或文件的名称(若上述参数未指定,则此项必需)",
|
||||
"formatting": "数据集格式(可选,默认:alpaca,可以为 alpaca 或 sharegpt)",
|
||||
"ranking": "是否为偏好数据集(可选,默认:False)",
|
||||
"subset": "数据集子集的名称(可选,默认:None)",
|
||||
"split": "所使用的数据集切分(可选,默认:train)",
|
||||
"folder": "Hugging Face 仓库的文件夹名称(可选,默认:None)",
|
||||
"num_samples": "该数据集所使用的样本数量。(可选,默认:None)",
|
||||
"columns(可选)": {
|
||||
"prompt": "数据集代表提示词的表头名称(默认:instruction)",
|
||||
"query": "数据集代表请求的表头名称(默认:input)",
|
||||
@ -19,7 +24,12 @@
|
||||
"messages": "数据集代表消息列表的表头名称(默认:conversations)",
|
||||
"system": "数据集代表系统提示的表头名称(默认:None)",
|
||||
"tools": "数据集代表工具描述的表头名称(默认:None)",
|
||||
"images": "数据集代表图像输入的表头名称(默认:None)"
|
||||
"images": "数据集代表图像输入的表头名称(默认:None)",
|
||||
"videos": "数据集代表视频输入的表头名称(默认:None)",
|
||||
"audios": "数据集代表音频输入的表头名称(默认:None)",
|
||||
"chosen": "数据集代表更优回答的表头名称(默认:None)",
|
||||
"rejected": "数据集代表更差回答的表头名称(默认:None)",
|
||||
"kto_tag": "数据集代表 KTO 标签的表头名称(默认:None)"
|
||||
},
|
||||
"tags(可选,用于 sharegpt 格式)": {
|
||||
"role_tag": "消息中代表发送者身份的键名(默认:from)",
|
||||
@ -28,16 +38,24 @@
|
||||
"assistant_tag": "消息中代表助手的 role_tag(默认:gpt)",
|
||||
"observation_tag": "消息中代表工具返回结果的 role_tag(默认:observation)",
|
||||
"function_tag": "消息中代表工具调用的 role_tag(默认:function_call)",
|
||||
"system_tag": "消息中代表系统提示的 role_tag(默认:system,会覆盖 system 列)"
|
||||
"system_tag": "消息中代表系统提示的 role_tag(默认:system,会覆盖 system column)"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
然后,可通过使用 `--dataset 数据集名称` 参数加载自定义数据集。
|
||||
## Alpaca 格式
|
||||
|
||||
----
|
||||
### 指令监督微调数据集
|
||||
|
||||
该项目目前支持两种格式的数据集:**alpaca** 和 **sharegpt**,其中 alpaca 格式的数据集按照以下方式组织:
|
||||
- [样例数据集](alpaca_zh_demo.json)
|
||||
|
||||
在指令监督微调时,`instruction` 列对应的内容会与 `input` 列对应的内容拼接后作为提示词,即提示词为 `instruction\ninput`。而 `output` 列对应的内容为模型回答。
|
||||
|
||||
对于推理类模型的微调,如果数据集包含思维链,则需要把思维链放在模型回答中,例如 `<think>cot</think>output`。
|
||||
|
||||
如果指定,`system` 列对应的内容将被作为系统提示词。
|
||||
|
||||
`history` 列是由多个字符串二元组构成的列表,分别代表历史消息中每轮对话的指令和回答。注意在指令监督微调时,历史消息中的回答内容**也会被用于模型学习**。
|
||||
|
||||
```json
|
||||
[
|
||||
@ -54,7 +72,7 @@
|
||||
]
|
||||
```
|
||||
|
||||
对于上述格式的数据,`dataset_info.json` 中的描述应为:
|
||||
对于上述格式的数据,`dataset_info.json` 中的*数据集描述*应为:
|
||||
|
||||
```json
|
||||
"数据集名称": {
|
||||
@ -69,11 +87,16 @@
|
||||
}
|
||||
```
|
||||
|
||||
其中 `query` 列对应的内容会与 `prompt` 列对应的内容拼接后作为用户指令,即用户指令为 `prompt\nquery`。`response` 列对应的内容为模型回答。
|
||||
> [!TIP]
|
||||
> 如果模型本身具备推理能力(如 Qwen3)而数据集不包含思维链,LLaMA-Factory 会自动为数据添加空思维链。当 `enable_thinking` 为 `True` 时(慢思考,默认),空思维链会添加到模型回答中并且计算损失,否则会添加到用户指令中并且不计算损失(快思考)。请在训练和推理时保持 `enable_thinking` 参数一致。
|
||||
>
|
||||
> 如果您希望训练包含思维链的数据时使用慢思考,训练不包含思维链的数据时使用快思考,可以设置 `enable_thinking` 为 `None`。但该功能较为复杂,请谨慎使用。
|
||||
|
||||
`system` 列对应的内容将被作为系统提示词。`history` 列是由多个字符串二元组构成的列表,分别代表历史消息中每轮的指令和回答。注意在指令监督学习时,历史消息中的回答**也会被用于训练**。
|
||||
### 预训练数据集
|
||||
|
||||
对于**预训练数据集**,仅 `prompt` 列中的内容会用于模型训练,例如:
|
||||
- [样例数据集](c4_demo.jsonl)
|
||||
|
||||
在预训练时,只有 `text` 列中的内容会用于模型学习。
|
||||
|
||||
```json
|
||||
[
|
||||
@ -82,7 +105,7 @@
|
||||
]
|
||||
```
|
||||
|
||||
对于上述格式的数据,`dataset_info.json` 中的描述应为:
|
||||
对于上述格式的数据,`dataset_info.json` 中的*数据集描述*应为:
|
||||
|
||||
```json
|
||||
"数据集名称": {
|
||||
@ -93,22 +116,24 @@
|
||||
}
|
||||
```
|
||||
|
||||
对于**偏好数据集**,`response` 列应当是一个长度为 2 的字符串列表,排在前面的代表更优的回答,例如:
|
||||
### 偏好数据集
|
||||
|
||||
偏好数据集用于奖励模型训练、DPO 训练、ORPO 训练和 SimPO 训练。
|
||||
|
||||
它需要在 `chosen` 列中提供更优的回答,并在 `rejected` 列中提供更差的回答。
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"instruction": "用户指令",
|
||||
"input": "用户输入",
|
||||
"output": [
|
||||
"优质回答",
|
||||
"劣质回答"
|
||||
]
|
||||
"instruction": "用户指令(必填)",
|
||||
"input": "用户输入(选填)",
|
||||
"chosen": "优质回答(必填)",
|
||||
"rejected": "劣质回答(必填)"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
对于上述格式的数据,`dataset_info.json` 中的描述应为:
|
||||
对于上述格式的数据,`dataset_info.json` 中的*数据集描述*应为:
|
||||
|
||||
```json
|
||||
"数据集名称": {
|
||||
@ -117,14 +142,138 @@
|
||||
"columns": {
|
||||
"prompt": "instruction",
|
||||
"query": "input",
|
||||
"response": "output",
|
||||
"chosen": "chosen",
|
||||
"rejected": "rejected"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
----
|
||||
### KTO 数据集
|
||||
|
||||
而 **sharegpt** 格式的数据集按照以下方式组织:
|
||||
KTO 数据集需要提供额外的 `kto_tag` 列。详情请参阅 [sharegpt](#sharegpt-格式)。
|
||||
|
||||
### 多模态图像数据集
|
||||
|
||||
多模态图像数据集需要提供额外的 `images` 列。详情请参阅 [sharegpt](#sharegpt-格式)。
|
||||
|
||||
### 多模态视频数据集
|
||||
|
||||
多模态视频数据集需要提供额外的 `videos` 列。详情请参阅 [sharegpt](#sharegpt-格式)。
|
||||
|
||||
### 多模态音频数据集
|
||||
|
||||
多模态音频数据集需要提供额外的 `audios` 列。详情请参阅 [sharegpt](#sharegpt-格式)。
|
||||
|
||||
## Sharegpt 格式
|
||||
|
||||
### 指令监督微调数据集
|
||||
|
||||
- [样例数据集](glaive_toolcall_zh_demo.json)
|
||||
|
||||
相比 alpaca 格式的数据集,sharegpt 格式支持**更多的角色种类**,例如 human、gpt、observation、function 等等。它们构成一个对象列表呈现在 `conversations` 列中。
|
||||
|
||||
注意其中 human 和 observation 必须出现在奇数位置,gpt 和 function 必须出现在偶数位置。默认所有的 gpt 和 function 会被用于学习。
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"conversations": [
|
||||
{
|
||||
"from": "human",
|
||||
"value": "用户指令"
|
||||
},
|
||||
{
|
||||
"from": "function_call",
|
||||
"value": "工具参数"
|
||||
},
|
||||
{
|
||||
"from": "observation",
|
||||
"value": "工具结果"
|
||||
},
|
||||
{
|
||||
"from": "gpt",
|
||||
"value": "模型回答"
|
||||
}
|
||||
],
|
||||
"system": "系统提示词(选填)",
|
||||
"tools": "工具描述(选填)"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
对于上述格式的数据,`dataset_info.json` 中的*数据集描述*应为:
|
||||
|
||||
```json
|
||||
"数据集名称": {
|
||||
"file_name": "data.json",
|
||||
"formatting": "sharegpt",
|
||||
"columns": {
|
||||
"messages": "conversations",
|
||||
"system": "system",
|
||||
"tools": "tools"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 预训练数据集
|
||||
|
||||
尚不支持,请使用 [alpaca](#alpaca-格式) 格式。
|
||||
|
||||
### 偏好数据集
|
||||
|
||||
- [样例数据集](dpo_zh_demo.json)
|
||||
|
||||
Sharegpt 格式的偏好数据集同样需要在 `chosen` 列中提供更优的消息,并在 `rejected` 列中提供更差的消息。
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"conversations": [
|
||||
{
|
||||
"from": "human",
|
||||
"value": "用户指令"
|
||||
},
|
||||
{
|
||||
"from": "gpt",
|
||||
"value": "模型回答"
|
||||
},
|
||||
{
|
||||
"from": "human",
|
||||
"value": "用户指令"
|
||||
}
|
||||
],
|
||||
"chosen": {
|
||||
"from": "gpt",
|
||||
"value": "优质回答"
|
||||
},
|
||||
"rejected": {
|
||||
"from": "gpt",
|
||||
"value": "劣质回答"
|
||||
}
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
对于上述格式的数据,`dataset_info.json` 中的*数据集描述*应为:
|
||||
|
||||
```json
|
||||
"数据集名称": {
|
||||
"file_name": "data.json",
|
||||
"formatting": "sharegpt",
|
||||
"ranking": true,
|
||||
"columns": {
|
||||
"messages": "conversations",
|
||||
"chosen": "chosen",
|
||||
"rejected": "rejected"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### KTO 数据集
|
||||
|
||||
- [样例数据集](kto_en_demo.json)
|
||||
|
||||
KTO 数据集需要额外添加一个 `kto_tag` 列,包含 bool 类型的人类反馈。
|
||||
|
||||
```json
|
||||
[
|
||||
@ -139,13 +288,12 @@
|
||||
"value": "模型回答"
|
||||
}
|
||||
],
|
||||
"system": "系统提示词(选填)",
|
||||
"tools": "工具描述(选填)"
|
||||
"kto_tag": "人类反馈 [true/false](必填)"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
对于上述格式的数据,`dataset_info.json` 中的描述应为:
|
||||
对于上述格式的数据,`dataset_info.json` 中的*数据集描述*应为:
|
||||
|
||||
```json
|
||||
"数据集名称": {
|
||||
@ -153,21 +301,141 @@
|
||||
"formatting": "sharegpt",
|
||||
"columns": {
|
||||
"messages": "conversations",
|
||||
"system": "system",
|
||||
"tools": "tools"
|
||||
},
|
||||
"tags": {
|
||||
"role_tag": "from",
|
||||
"content_tag": "value",
|
||||
"user_tag": "human",
|
||||
"assistant_tag": "gpt"
|
||||
"kto_tag": "kto_tag"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
其中 `messages` 列应当是一个列表,且符合 `用户/模型/用户/模型/用户/模型` 的顺序。
|
||||
### 多模态图像数据集
|
||||
|
||||
我们同样支持 **openai** 格式的数据集:
|
||||
- [样例数据集](mllm_demo.json)
|
||||
|
||||
多模态图像数据集需要额外添加一个 `images` 列,包含输入图像的路径。
|
||||
|
||||
注意图片的数量必须与文本中所有 `<image>` 标记的数量严格一致。
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"conversations": [
|
||||
{
|
||||
"from": "human",
|
||||
"value": "<image><image>用户指令"
|
||||
},
|
||||
{
|
||||
"from": "gpt",
|
||||
"value": "模型回答"
|
||||
}
|
||||
],
|
||||
"images": [
|
||||
"图像路径(必填)",
|
||||
"图像路径(必填)"
|
||||
]
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
对于上述格式的数据,`dataset_info.json` 中的*数据集描述*应为:
|
||||
|
||||
```json
|
||||
"数据集名称": {
|
||||
"file_name": "data.json",
|
||||
"formatting": "sharegpt",
|
||||
"columns": {
|
||||
"messages": "conversations",
|
||||
"images": "images"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 多模态视频数据集
|
||||
|
||||
- [样例数据集](mllm_video_demo.json)
|
||||
|
||||
多模态视频数据集需要额外添加一个 `videos` 列,包含输入视频的路径。
|
||||
|
||||
注意视频的数量必须与文本中所有 `<video>` 标记的数量严格一致。
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"conversations": [
|
||||
{
|
||||
"from": "human",
|
||||
"value": "<video><video>用户指令"
|
||||
},
|
||||
{
|
||||
"from": "gpt",
|
||||
"value": "模型回答"
|
||||
}
|
||||
],
|
||||
"videos": [
|
||||
"视频路径(必填)",
|
||||
"视频路径(必填)"
|
||||
]
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
对于上述格式的数据,`dataset_info.json` 中的*数据集描述*应为:
|
||||
|
||||
```json
|
||||
"数据集名称": {
|
||||
"file_name": "data.json",
|
||||
"formatting": "sharegpt",
|
||||
"columns": {
|
||||
"messages": "conversations",
|
||||
"videos": "videos"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 多模态音频数据集
|
||||
|
||||
- [样例数据集](mllm_audio_demo.json)
|
||||
|
||||
多模态音频数据集需要额外添加一个 `audios` 列,包含输入音频的路径。
|
||||
|
||||
注意音频的数量必须与文本中所有 `<audio>` 标记的数量严格一致。
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"conversations": [
|
||||
{
|
||||
"from": "human",
|
||||
"value": "<audio><audio>用户指令"
|
||||
},
|
||||
{
|
||||
"from": "gpt",
|
||||
"value": "模型回答"
|
||||
}
|
||||
],
|
||||
"audios": [
|
||||
"音频路径(必填)",
|
||||
"音频路径(必填)"
|
||||
]
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
对于上述格式的数据,`dataset_info.json` 中的*数据集描述*应为:
|
||||
|
||||
```json
|
||||
"数据集名称": {
|
||||
"file_name": "data.json",
|
||||
"formatting": "sharegpt",
|
||||
"columns": {
|
||||
"messages": "conversations",
|
||||
"audios": "audios"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
### OpenAI 格式
|
||||
|
||||
OpenAI 格式仅仅是 sharegpt 格式的一种特殊情况,其中第一条消息可能是系统提示词。
|
||||
|
||||
```json
|
||||
[
|
||||
@ -190,7 +458,7 @@
|
||||
]
|
||||
```
|
||||
|
||||
对于上述格式的数据,`dataset_info.json` 中的描述应为:
|
||||
对于上述格式的数据,`dataset_info.json` 中的*数据集描述*应为:
|
||||
|
||||
```json
|
||||
"数据集名称": {
|
||||
@ -208,5 +476,3 @@
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
预训练数据集和偏好数据集**尚不支持** sharegpt 格式。
|
||||
|
@ -1 +0,0 @@
|
||||
3779ddbc040543ab1834ef216c983d6fcc06cc9a
|
@ -1 +0,0 @@
|
||||
a97cf9475291591843976554878568e046d8a46d
|
4997
data/alpaca_en_demo.json
Normal file
4997
data/alpaca_en_demo.json
Normal file
File diff suppressed because it is too large
Load Diff
@ -1 +0,0 @@
|
||||
25508714b7879a1e5a6764ba7f979a980f549f1a
|
@ -1 +0,0 @@
|
||||
7cb6a7d11455bddc3d495750a2392683d775b184
|
5002
data/alpaca_zh_demo.json
Normal file
5002
data/alpaca_zh_demo.json
Normal file
File diff suppressed because it is too large
Load Diff
@ -1,3 +1,18 @@
|
||||
# Copyright 2025 the LlamaFactory team.
|
||||
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import json
|
||||
import os
|
||||
|
||||
@ -10,16 +25,16 @@ _DESCRIPTION = "BELLE multiturn chat dataset."
|
||||
|
||||
_CITATION = """\
|
||||
@article{belle2023exploring,
|
||||
title={Exploring the Impact of Instruction Data Scaling on Large Language Models: An Empirical Study on Real-World Use Cases},
|
||||
title={Exploring the Impact of Instruction Data Scaling on Large Language Models},
|
||||
author={Yunjie Ji, Yong Deng, Yan Gong, Yiping Peng, Qiang Niu, Lei Zhang, Baochang Ma, Xiangang Li},
|
||||
journal={arXiv preprint arXiv:2303.14742},
|
||||
year={2023}
|
||||
}
|
||||
"""
|
||||
|
||||
_HOMEPAGE = "{}/datasets/BelleGroup/multiturn_chat_0.8M".format(_HF_ENDPOINT)
|
||||
_HOMEPAGE = f"{_HF_ENDPOINT}/datasets/BelleGroup/multiturn_chat_0.8M"
|
||||
_LICENSE = "gpl-3.0"
|
||||
_URL = "{}/datasets/BelleGroup/multiturn_chat_0.8M/resolve/main/multiturn_chat_0.8M.json".format(_HF_ENDPOINT)
|
||||
_URL = f"{_HF_ENDPOINT}/datasets/BelleGroup/multiturn_chat_0.8M/resolve/main/multiturn_chat_0.8M.json"
|
||||
|
||||
|
||||
class BelleMultiturn(datasets.GeneratorBasedBuilder):
|
||||
@ -38,7 +53,7 @@ class BelleMultiturn(datasets.GeneratorBasedBuilder):
|
||||
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": file_path})]
|
||||
|
||||
def _generate_examples(self, filepath: str):
|
||||
with open(filepath, "r", encoding="utf-8") as f:
|
||||
with open(filepath, encoding="utf-8") as f:
|
||||
for key, row in enumerate(f):
|
||||
data = json.loads(row)
|
||||
conversations = []
|
||||
|
File diff suppressed because one or more lines are too long
300
data/c4_demo.jsonl
Normal file
300
data/c4_demo.jsonl
Normal file
File diff suppressed because one or more lines are too long
@ -1 +0,0 @@
|
||||
f5cb08305ff5dc9c17a09809c54c8c8834aadc70
|
@ -1 +0,0 @@
|
||||
aee47b7b443496e37808d7f34ef10403ff99bcc3
|
@ -1,39 +1,23 @@
|
||||
{
|
||||
"alpaca_en": {
|
||||
"file_name": "alpaca_data_en_52k.json"
|
||||
},
|
||||
"alpaca_zh": {
|
||||
"file_name": "alpaca_data_zh_51k.json"
|
||||
},
|
||||
"alpaca_gpt4_en": {
|
||||
"file_name": "alpaca_gpt4_data_en.json"
|
||||
},
|
||||
"alpaca_gpt4_zh": {
|
||||
"file_name": "alpaca_gpt4_data_zh.json"
|
||||
},
|
||||
"identity": {
|
||||
"file_name": "identity.json"
|
||||
},
|
||||
"oaast_sft_zh": {
|
||||
"file_name": "oaast_sft_zh.json",
|
||||
"alpaca_en_demo": {
|
||||
"file_name": "alpaca_en_demo.json"
|
||||
},
|
||||
"alpaca_zh_demo": {
|
||||
"file_name": "alpaca_zh_demo.json"
|
||||
},
|
||||
"glaive_toolcall_en_demo": {
|
||||
"file_name": "glaive_toolcall_en_demo.json",
|
||||
"formatting": "sharegpt",
|
||||
"columns": {
|
||||
"prompt": "instruction",
|
||||
"query": "input",
|
||||
"response": "output",
|
||||
"history": "history"
|
||||
"messages": "conversations",
|
||||
"tools": "tools"
|
||||
}
|
||||
},
|
||||
"lima": {
|
||||
"file_name": "lima.json",
|
||||
"columns": {
|
||||
"prompt": "instruction",
|
||||
"query": "input",
|
||||
"response": "output",
|
||||
"history": "history"
|
||||
}
|
||||
},
|
||||
"glaive_toolcall": {
|
||||
"file_name": "glaive_toolcall_10k.json",
|
||||
"glaive_toolcall_zh_demo": {
|
||||
"file_name": "glaive_toolcall_zh_demo.json",
|
||||
"formatting": "sharegpt",
|
||||
"columns": {
|
||||
"messages": "conversations",
|
||||
@ -54,15 +38,87 @@
|
||||
"assistant_tag": "assistant"
|
||||
}
|
||||
},
|
||||
"example": {
|
||||
"script_url": "example_dataset",
|
||||
"mllm_audio_demo": {
|
||||
"file_name": "mllm_audio_demo.json",
|
||||
"formatting": "sharegpt",
|
||||
"columns": {
|
||||
"prompt": "instruction",
|
||||
"query": "input",
|
||||
"response": "output",
|
||||
"history": "history"
|
||||
"messages": "messages",
|
||||
"audios": "audios"
|
||||
},
|
||||
"tags": {
|
||||
"role_tag": "role",
|
||||
"content_tag": "content",
|
||||
"user_tag": "user",
|
||||
"assistant_tag": "assistant"
|
||||
}
|
||||
},
|
||||
"mllm_video_demo": {
|
||||
"file_name": "mllm_video_demo.json",
|
||||
"formatting": "sharegpt",
|
||||
"columns": {
|
||||
"messages": "messages",
|
||||
"videos": "videos"
|
||||
},
|
||||
"tags": {
|
||||
"role_tag": "role",
|
||||
"content_tag": "content",
|
||||
"user_tag": "user",
|
||||
"assistant_tag": "assistant"
|
||||
}
|
||||
},
|
||||
"mllm_video_audio_demo": {
|
||||
"file_name": "mllm_video_audio_demo.json",
|
||||
"formatting": "sharegpt",
|
||||
"columns": {
|
||||
"messages": "messages",
|
||||
"videos": "videos",
|
||||
"audios": "audios"
|
||||
},
|
||||
"tags": {
|
||||
"role_tag": "role",
|
||||
"content_tag": "content",
|
||||
"user_tag": "user",
|
||||
"assistant_tag": "assistant"
|
||||
}
|
||||
},
|
||||
"alpaca_en": {
|
||||
"hf_hub_url": "llamafactory/alpaca_en",
|
||||
"ms_hub_url": "llamafactory/alpaca_en",
|
||||
"om_hub_url": "HaM/alpaca_en"
|
||||
},
|
||||
"alpaca_zh": {
|
||||
"hf_hub_url": "llamafactory/alpaca_zh",
|
||||
"ms_hub_url": "llamafactory/alpaca_zh"
|
||||
},
|
||||
"alpaca_gpt4_en": {
|
||||
"hf_hub_url": "llamafactory/alpaca_gpt4_en",
|
||||
"ms_hub_url": "llamafactory/alpaca_gpt4_en"
|
||||
},
|
||||
"alpaca_gpt4_zh": {
|
||||
"hf_hub_url": "llamafactory/alpaca_gpt4_zh",
|
||||
"ms_hub_url": "llamafactory/alpaca_gpt4_zh",
|
||||
"om_hub_url": "State_Cloud/alpaca-gpt4-data-zh"
|
||||
},
|
||||
"glaive_toolcall_en": {
|
||||
"hf_hub_url": "llamafactory/glaive_toolcall_en",
|
||||
"formatting": "sharegpt",
|
||||
"columns": {
|
||||
"messages": "conversations",
|
||||
"tools": "tools"
|
||||
}
|
||||
},
|
||||
"glaive_toolcall_zh": {
|
||||
"hf_hub_url": "llamafactory/glaive_toolcall_zh",
|
||||
"formatting": "sharegpt",
|
||||
"columns": {
|
||||
"messages": "conversations",
|
||||
"tools": "tools"
|
||||
}
|
||||
},
|
||||
"lima": {
|
||||
"hf_hub_url": "llamafactory/lima",
|
||||
"formatting": "sharegpt"
|
||||
},
|
||||
"guanaco": {
|
||||
"hf_hub_url": "JosephusCheung/GuanacoDataset",
|
||||
"ms_hub_url": "AI-ModelScope/GuanacoDataset"
|
||||
@ -161,9 +217,19 @@
|
||||
"deepctrl": {
|
||||
"ms_hub_url": "deepctrl/deepctrl-sft-data"
|
||||
},
|
||||
"adgen": {
|
||||
"adgen_train": {
|
||||
"hf_hub_url": "HasturOfficial/adgen",
|
||||
"ms_hub_url": "AI-ModelScope/adgen",
|
||||
"split": "train",
|
||||
"columns": {
|
||||
"prompt": "content",
|
||||
"response": "summary"
|
||||
}
|
||||
},
|
||||
"adgen_eval": {
|
||||
"hf_hub_url": "HasturOfficial/adgen",
|
||||
"ms_hub_url": "AI-ModelScope/adgen",
|
||||
"split": "validation",
|
||||
"columns": {
|
||||
"prompt": "content",
|
||||
"response": "summary"
|
||||
@ -181,6 +247,7 @@
|
||||
"ultrachat_200k": {
|
||||
"hf_hub_url": "HuggingFaceH4/ultrachat_200k",
|
||||
"ms_hub_url": "AI-ModelScope/ultrachat_200k",
|
||||
"split": "train_sft",
|
||||
"formatting": "sharegpt",
|
||||
"columns": {
|
||||
"messages": "messages"
|
||||
@ -207,7 +274,7 @@
|
||||
"tags": {
|
||||
"role_tag": "role",
|
||||
"content_tag": "content",
|
||||
"user_tag": "human",
|
||||
"user_tag": "user",
|
||||
"assistant_tag": "assistant"
|
||||
}
|
||||
},
|
||||
@ -231,6 +298,104 @@
|
||||
"response": "text"
|
||||
}
|
||||
},
|
||||
"stem_zh": {
|
||||
"hf_hub_url": "hfl/stem_zh_instruction"
|
||||
},
|
||||
"ruozhiba_gpt4": {
|
||||
"hf_hub_url": "hfl/ruozhiba_gpt4_turbo"
|
||||
},
|
||||
"neo_sft": {
|
||||
"hf_hub_url": "m-a-p/neo_sft_phase2",
|
||||
"formatting": "sharegpt"
|
||||
},
|
||||
"magpie_pro_300k": {
|
||||
"hf_hub_url": "Magpie-Align/Magpie-Pro-300K-Filtered",
|
||||
"formatting": "sharegpt"
|
||||
},
|
||||
"magpie_ultra": {
|
||||
"hf_hub_url": "argilla/magpie-ultra-v0.1",
|
||||
"columns": {
|
||||
"prompt": "instruction",
|
||||
"response": "response"
|
||||
}
|
||||
},
|
||||
"web_instruct": {
|
||||
"hf_hub_url": "TIGER-Lab/WebInstructSub",
|
||||
"columns": {
|
||||
"prompt": "question",
|
||||
"response": "answer"
|
||||
}
|
||||
},
|
||||
"openo1_sft": {
|
||||
"hf_hub_url": "llamafactory/OpenO1-SFT",
|
||||
"ms_hub_url": "llamafactory/OpenO1-SFT",
|
||||
"columns": {
|
||||
"prompt": "prompt",
|
||||
"response": "response"
|
||||
}
|
||||
},
|
||||
"open_thoughts": {
|
||||
"hf_hub_url": "llamafactory/OpenThoughts-114k",
|
||||
"formatting": "sharegpt",
|
||||
"columns": {
|
||||
"messages": "messages"
|
||||
},
|
||||
"tags": {
|
||||
"role_tag": "role",
|
||||
"content_tag": "content",
|
||||
"user_tag": "user",
|
||||
"assistant_tag": "assistant",
|
||||
"system_tag": "system"
|
||||
}
|
||||
},
|
||||
"open_r1_math": {
|
||||
"hf_hub_url": "llamafactory/OpenR1-Math-94k",
|
||||
"formatting": "sharegpt",
|
||||
"columns": {
|
||||
"messages": "messages"
|
||||
},
|
||||
"tags": {
|
||||
"role_tag": "role",
|
||||
"content_tag": "content",
|
||||
"user_tag": "user",
|
||||
"assistant_tag": "assistant",
|
||||
"system_tag": "system"
|
||||
}
|
||||
},
|
||||
"chinese_r1_distill": {
|
||||
"hf_hub_url": "Congliu/Chinese-DeepSeek-R1-Distill-data-110k-SFT",
|
||||
"ms_hub_url": "liucong/Chinese-DeepSeek-R1-Distill-data-110k-SFT"
|
||||
},
|
||||
"llava_1k_en": {
|
||||
"hf_hub_url": "BUAADreamer/llava-en-zh-2k",
|
||||
"subset": "en",
|
||||
"formatting": "sharegpt",
|
||||
"columns": {
|
||||
"messages": "messages",
|
||||
"images": "images"
|
||||
},
|
||||
"tags": {
|
||||
"role_tag": "role",
|
||||
"content_tag": "content",
|
||||
"user_tag": "user",
|
||||
"assistant_tag": "assistant"
|
||||
}
|
||||
},
|
||||
"llava_1k_zh": {
|
||||
"hf_hub_url": "BUAADreamer/llava-en-zh-2k",
|
||||
"subset": "zh",
|
||||
"formatting": "sharegpt",
|
||||
"columns": {
|
||||
"messages": "messages",
|
||||
"images": "images"
|
||||
},
|
||||
"tags": {
|
||||
"role_tag": "role",
|
||||
"content_tag": "content",
|
||||
"user_tag": "user",
|
||||
"assistant_tag": "assistant"
|
||||
}
|
||||
},
|
||||
"llava_150k_en": {
|
||||
"hf_hub_url": "BUAADreamer/llava-en-zh-300k",
|
||||
"subset": "en",
|
||||
@ -261,6 +426,28 @@
|
||||
"assistant_tag": "assistant"
|
||||
}
|
||||
},
|
||||
"pokemon_cap": {
|
||||
"hf_hub_url": "llamafactory/pokemon-gpt4o-captions",
|
||||
"formatting": "sharegpt",
|
||||
"columns": {
|
||||
"messages": "conversations",
|
||||
"images": "images"
|
||||
}
|
||||
},
|
||||
"mllm_pt_demo": {
|
||||
"hf_hub_url": "BUAADreamer/mllm_pt_demo",
|
||||
"formatting": "sharegpt",
|
||||
"columns": {
|
||||
"messages": "messages",
|
||||
"images": "images"
|
||||
},
|
||||
"tags": {
|
||||
"role_tag": "role",
|
||||
"content_tag": "content",
|
||||
"user_tag": "user",
|
||||
"assistant_tag": "assistant"
|
||||
}
|
||||
},
|
||||
"oasst_de": {
|
||||
"hf_hub_url": "mayflowergmbh/oasst_de"
|
||||
},
|
||||
@ -288,73 +475,166 @@
|
||||
"ultrachat_de": {
|
||||
"hf_hub_url": "mayflowergmbh/ultra-chat_de"
|
||||
},
|
||||
"hh_rlhf_en": {
|
||||
"script_url": "hh_rlhf_en",
|
||||
"dpo_en_demo": {
|
||||
"file_name": "dpo_en_demo.json",
|
||||
"ranking": true,
|
||||
"formatting": "sharegpt",
|
||||
"columns": {
|
||||
"messages": "conversations",
|
||||
"chosen": "chosen",
|
||||
"rejected": "rejected"
|
||||
}
|
||||
},
|
||||
"dpo_zh_demo": {
|
||||
"file_name": "dpo_zh_demo.json",
|
||||
"ranking": true,
|
||||
"formatting": "sharegpt",
|
||||
"columns": {
|
||||
"messages": "conversations",
|
||||
"chosen": "chosen",
|
||||
"rejected": "rejected"
|
||||
}
|
||||
},
|
||||
"dpo_mix_en": {
|
||||
"hf_hub_url": "llamafactory/DPO-En-Zh-20k",
|
||||
"subset": "en",
|
||||
"ranking": true,
|
||||
"formatting": "sharegpt",
|
||||
"columns": {
|
||||
"messages": "conversations",
|
||||
"chosen": "chosen",
|
||||
"rejected": "rejected"
|
||||
}
|
||||
},
|
||||
"dpo_mix_zh": {
|
||||
"hf_hub_url": "llamafactory/DPO-En-Zh-20k",
|
||||
"subset": "zh",
|
||||
"ranking": true,
|
||||
"formatting": "sharegpt",
|
||||
"columns": {
|
||||
"messages": "conversations",
|
||||
"chosen": "chosen",
|
||||
"rejected": "rejected"
|
||||
}
|
||||
},
|
||||
"ultrafeedback": {
|
||||
"hf_hub_url": "llamafactory/ultrafeedback_binarized",
|
||||
"ms_hub_url": "llamafactory/ultrafeedback_binarized",
|
||||
"ranking": true,
|
||||
"columns": {
|
||||
"prompt": "instruction",
|
||||
"response": "output",
|
||||
"history": "history"
|
||||
},
|
||||
"ranking": true
|
||||
"chosen": "chosen",
|
||||
"rejected": "rejected"
|
||||
}
|
||||
},
|
||||
"oaast_rm_zh": {
|
||||
"file_name": "oaast_rm_zh.json",
|
||||
"coig_p": {
|
||||
"hf_hub_url": "m-a-p/COIG-P",
|
||||
"ranking": true,
|
||||
"formatting": "sharegpt",
|
||||
"columns": {
|
||||
"prompt": "instruction",
|
||||
"query": "input",
|
||||
"response": "output",
|
||||
"history": "history"
|
||||
},
|
||||
"ranking": true
|
||||
"messages": "conversations",
|
||||
"chosen": "chosen",
|
||||
"rejected": "rejected"
|
||||
}
|
||||
},
|
||||
"comparison_gpt4_en": {
|
||||
"file_name": "comparison_gpt4_data_en.json",
|
||||
"ranking": true
|
||||
"rlhf_v": {
|
||||
"hf_hub_url": "llamafactory/RLHF-V",
|
||||
"ranking": true,
|
||||
"formatting": "sharegpt",
|
||||
"columns": {
|
||||
"messages": "conversations",
|
||||
"chosen": "chosen",
|
||||
"rejected": "rejected",
|
||||
"images": "images"
|
||||
}
|
||||
},
|
||||
"comparison_gpt4_zh": {
|
||||
"file_name": "comparison_gpt4_data_zh.json",
|
||||
"ranking": true
|
||||
"vlfeedback": {
|
||||
"hf_hub_url": "Zhihui/VLFeedback",
|
||||
"ranking": true,
|
||||
"formatting": "sharegpt",
|
||||
"columns": {
|
||||
"messages": "conversations",
|
||||
"chosen": "chosen",
|
||||
"rejected": "rejected",
|
||||
"images": "images"
|
||||
}
|
||||
},
|
||||
"orca_rlhf": {
|
||||
"file_name": "orca_rlhf.json",
|
||||
"rlaif_v": {
|
||||
"hf_hub_url": "openbmb/RLAIF-V-Dataset",
|
||||
"ranking": true,
|
||||
"columns": {
|
||||
"prompt": "question",
|
||||
"response": "answer",
|
||||
"chosen": "chosen",
|
||||
"rejected": "rejected",
|
||||
"images": "image"
|
||||
}
|
||||
},
|
||||
"orca_pairs": {
|
||||
"hf_hub_url": "Intel/orca_dpo_pairs",
|
||||
"ranking": true,
|
||||
"columns": {
|
||||
"prompt": "question",
|
||||
"chosen": "chosen",
|
||||
"rejected": "rejected",
|
||||
"system": "system"
|
||||
}
|
||||
},
|
||||
"hh_rlhf_en": {
|
||||
"script_url": "hh_rlhf_en",
|
||||
"ranking": true,
|
||||
"columns": {
|
||||
"prompt": "instruction",
|
||||
"chosen": "chosen",
|
||||
"rejected": "rejected",
|
||||
"history": "history"
|
||||
}
|
||||
},
|
||||
"nectar_rm": {
|
||||
"hf_hub_url": "AstraMindAI/RLAIF-Nectar",
|
||||
"ms_hub_url": "AI-ModelScope/RLAIF-Nectar",
|
||||
"ranking": true
|
||||
},
|
||||
"dpo_mix_en": {
|
||||
"hf_hub_url": "hiyouga/DPO-En-Zh-20k",
|
||||
"subset": "en",
|
||||
"ranking": true,
|
||||
"columns": {
|
||||
"prompt": "prompt",
|
||||
"response": "answer",
|
||||
"system": "system",
|
||||
"history": "history"
|
||||
}
|
||||
},
|
||||
"dpo_mix_zh": {
|
||||
"hf_hub_url": "hiyouga/DPO-En-Zh-20k",
|
||||
"subset": "zh",
|
||||
"ranking": true,
|
||||
"columns": {
|
||||
"prompt": "prompt",
|
||||
"response": "answer",
|
||||
"system": "system",
|
||||
"history": "history"
|
||||
}
|
||||
},
|
||||
"orca_dpo_de": {
|
||||
"hf_hub_url": "mayflowergmbh/intel_orca_dpo_pairs_de",
|
||||
"ranking": true
|
||||
},
|
||||
"kto_en_demo": {
|
||||
"file_name": "kto_en_demo.json",
|
||||
"formatting": "sharegpt",
|
||||
"columns": {
|
||||
"messages": "messages",
|
||||
"kto_tag": "label"
|
||||
},
|
||||
"tags": {
|
||||
"role_tag": "role",
|
||||
"content_tag": "content",
|
||||
"user_tag": "user",
|
||||
"assistant_tag": "assistant"
|
||||
}
|
||||
},
|
||||
"kto_mix_en": {
|
||||
"hf_hub_url": "argilla/kto-mix-15k",
|
||||
"formatting": "sharegpt",
|
||||
"columns": {
|
||||
"messages": "completion",
|
||||
"kto_tag": "label"
|
||||
},
|
||||
"tags": {
|
||||
"role_tag": "role",
|
||||
"content_tag": "content",
|
||||
"user_tag": "user",
|
||||
"assistant_tag": "assistant"
|
||||
}
|
||||
},
|
||||
"ultrafeedback_kto": {
|
||||
"hf_hub_url": "argilla/ultrafeedback-binarized-preferences-cleaned-kto",
|
||||
"ms_hub_url": "AI-ModelScope/ultrafeedback-binarized-preferences-cleaned-kto",
|
||||
"columns": {
|
||||
"prompt": "prompt",
|
||||
"response": "completion",
|
||||
"kto_tag": "label"
|
||||
}
|
||||
},
|
||||
"wiki_demo": {
|
||||
"file_name": "wiki_demo.txt",
|
||||
"columns": {
|
||||
@ -362,7 +642,7 @@
|
||||
}
|
||||
},
|
||||
"c4_demo": {
|
||||
"file_name": "c4_demo.json",
|
||||
"file_name": "c4_demo.jsonl",
|
||||
"columns": {
|
||||
"prompt": "text"
|
||||
}
|
||||
@ -408,6 +688,18 @@
|
||||
"prompt": "text"
|
||||
}
|
||||
},
|
||||
"fineweb": {
|
||||
"hf_hub_url": "HuggingFaceFW/fineweb",
|
||||
"columns": {
|
||||
"prompt": "text"
|
||||
}
|
||||
},
|
||||
"fineweb_edu": {
|
||||
"hf_hub_url": "HuggingFaceFW/fineweb-edu",
|
||||
"columns": {
|
||||
"prompt": "text"
|
||||
}
|
||||
},
|
||||
"the_stack": {
|
||||
"hf_hub_url": "bigcode/the-stack",
|
||||
"ms_hub_url": "AI-ModelScope/the-stack",
|
||||
@ -423,4 +715,4 @@
|
||||
},
|
||||
"folder": "python"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
7226
data/dpo_en_demo.json
Normal file
7226
data/dpo_en_demo.json
Normal file
File diff suppressed because one or more lines are too long
5058
data/dpo_zh_demo.json
Normal file
5058
data/dpo_zh_demo.json
Normal file
File diff suppressed because one or more lines are too long
@ -1,37 +0,0 @@
|
||||
import json
|
||||
from typing import Any, Dict, Generator, List, Tuple
|
||||
|
||||
import datasets
|
||||
|
||||
|
||||
_DESCRIPTION = "An example of dataset."
|
||||
_CITATION = ""
|
||||
_HOMEPAGE = ""
|
||||
_LICENSE = ""
|
||||
_URL = "examples.json"
|
||||
|
||||
|
||||
class ExampleDataset(datasets.GeneratorBasedBuilder):
|
||||
VERSION = datasets.Version("0.0.0")
|
||||
|
||||
def _info(self) -> datasets.DatasetInfo:
|
||||
features = datasets.Features(
|
||||
{
|
||||
"instruction": datasets.Value("string"),
|
||||
"input": datasets.Value("string"),
|
||||
"output": datasets.Value("string"),
|
||||
"history": datasets.Sequence(datasets.Sequence(datasets.Value("string"))),
|
||||
}
|
||||
)
|
||||
return datasets.DatasetInfo(
|
||||
description=_DESCRIPTION, features=features, homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION
|
||||
)
|
||||
|
||||
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
|
||||
file_path = dl_manager.download(_URL)
|
||||
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": file_path})]
|
||||
|
||||
def _generate_examples(self, filepath: str) -> Generator[Tuple[int, Dict[str, Any]], None, None]:
|
||||
example_dataset = json.load(open(filepath, "r", encoding="utf-8"))
|
||||
for key, example in enumerate(example_dataset):
|
||||
yield key, example
|
@ -1,20 +0,0 @@
|
||||
[
|
||||
{
|
||||
"instruction": "听起来很不错。人工智能可能在哪些方面面临挑战呢?",
|
||||
"input": "",
|
||||
"output": "人工智能面临的挑战包括数据隐私、安全和道德方面的问题,以及影响就业机会的自动化等问题。",
|
||||
"history": [
|
||||
["你好,你能帮我解答一个问题吗?", "当然,请问有什么问题?"],
|
||||
["我想了解人工智能的未来发展方向,你有什么想法吗?", "人工智能在未来的发展方向可能包括更强大的机器学习算法,更先进的自然语言处理技术,以及更加智能的机器人。"]
|
||||
]
|
||||
},
|
||||
{
|
||||
"instruction": "好的,谢谢你!",
|
||||
"input": "",
|
||||
"output": "不客气,有其他需要帮忙的地方可以继续问我。",
|
||||
"history": [
|
||||
["你好,能告诉我今天天气怎么样吗?", "当然可以,请问您所在的城市是哪里?"],
|
||||
["我在纽约。", "纽约今天晴间多云,气温最高约26摄氏度,最低约18摄氏度,记得注意保暖喔。"]
|
||||
]
|
||||
}
|
||||
]
|
@ -1 +0,0 @@
|
||||
4748dff00d1dc42768a5b6cc772143c313017812
|
9158
data/glaive_toolcall_en_demo.json
Normal file
9158
data/glaive_toolcall_en_demo.json
Normal file
File diff suppressed because one or more lines are too long
9022
data/glaive_toolcall_zh_demo.json
Normal file
9022
data/glaive_toolcall_zh_demo.json
Normal file
File diff suppressed because it is too large
Load Diff
@ -1,6 +1,20 @@
|
||||
# Copyright 2025 the LlamaFactory team.
|
||||
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import json
|
||||
import os
|
||||
from typing import List
|
||||
|
||||
import datasets
|
||||
|
||||
@ -8,9 +22,9 @@ import datasets
|
||||
_HF_ENDPOINT = os.getenv("HF_ENDPOINT", "https://huggingface.co")
|
||||
_DESCRIPTION = "Human preference data about helpfulness and harmlessness."
|
||||
_CITATION = ""
|
||||
_HOMEPAGE = "{}/datasets/Anthropic/hh-rlhf".format(_HF_ENDPOINT)
|
||||
_HOMEPAGE = f"{_HF_ENDPOINT}/datasets/Anthropic/hh-rlhf"
|
||||
_LICENSE = "mit"
|
||||
_URL = "{}/datasets/Anthropic/hh-rlhf/resolve/main/".format(_HF_ENDPOINT)
|
||||
_URL = f"{_HF_ENDPOINT}/datasets/Anthropic/hh-rlhf/resolve/main/"
|
||||
_URLS = {
|
||||
"train": [
|
||||
_URL + "harmless-base/train.jsonl.gz",
|
||||
@ -34,7 +48,8 @@ class HhRlhfEn(datasets.GeneratorBasedBuilder):
|
||||
features = datasets.Features(
|
||||
{
|
||||
"instruction": datasets.Value("string"),
|
||||
"output": datasets.Sequence(datasets.Value("string")),
|
||||
"chosen": datasets.Value("string"),
|
||||
"rejected": datasets.Value("string"),
|
||||
"history": datasets.Sequence(datasets.Sequence(datasets.Value("string"))),
|
||||
}
|
||||
)
|
||||
@ -49,10 +64,10 @@ class HhRlhfEn(datasets.GeneratorBasedBuilder):
|
||||
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepaths": file_path["test"]}),
|
||||
]
|
||||
|
||||
def _generate_examples(self, filepaths: List[str]):
|
||||
def _generate_examples(self, filepaths: list[str]):
|
||||
key = 0
|
||||
for filepath in filepaths:
|
||||
with open(filepath, "r", encoding="utf-8") as f:
|
||||
with open(filepath, encoding="utf-8") as f:
|
||||
for row in f:
|
||||
data = json.loads(row)
|
||||
chosen = data["chosen"]
|
||||
@ -79,5 +94,5 @@ class HhRlhfEn(datasets.GeneratorBasedBuilder):
|
||||
break
|
||||
prompt = prompt[:human_idx]
|
||||
|
||||
yield key, {"instruction": query, "output": [r_accept, r_reject], "history": history}
|
||||
yield key, {"instruction": query, "chosen": r_accept, "rejected": r_reject, "history": history}
|
||||
key += 1
|
||||
|
@ -454,4 +454,4 @@
|
||||
"input": "",
|
||||
"output": "抱歉,我不是 OpenAI 开发的 ChatGPT,我是 {{author}} 开发的 {{name}},旨在为用户提供智能化的回答和帮助。"
|
||||
}
|
||||
]
|
||||
]
|
||||
|
5398
data/kto_en_demo.json
Normal file
5398
data/kto_en_demo.json
Normal file
File diff suppressed because one or more lines are too long
6417
data/lima.json
6417
data/lima.json
File diff suppressed because one or more lines are too long
47
data/mllm_audio_demo.json
Normal file
47
data/mllm_audio_demo.json
Normal file
@ -0,0 +1,47 @@
|
||||
[
|
||||
{
|
||||
"messages": [
|
||||
{
|
||||
"content": "<audio>What's that sound?",
|
||||
"role": "user"
|
||||
},
|
||||
{
|
||||
"content": "It is the sound of glass shattering.",
|
||||
"role": "assistant"
|
||||
}
|
||||
],
|
||||
"audios": [
|
||||
"mllm_demo_data/1.mp3"
|
||||
]
|
||||
},
|
||||
{
|
||||
"messages": [
|
||||
{
|
||||
"content": "<audio>What can you hear?",
|
||||
"role": "user"
|
||||
},
|
||||
{
|
||||
"content": "A woman is coughing.",
|
||||
"role": "assistant"
|
||||
}
|
||||
],
|
||||
"audios": [
|
||||
"mllm_demo_data/2.wav"
|
||||
]
|
||||
},
|
||||
{
|
||||
"messages": [
|
||||
{
|
||||
"content": "<audio>What does the person say?",
|
||||
"role": "user"
|
||||
},
|
||||
{
|
||||
"content": "Mister Quiller is the apostle of the middle classes and we are glad to welcome his gospel.",
|
||||
"role": "assistant"
|
||||
}
|
||||
],
|
||||
"audios": [
|
||||
"mllm_demo_data/3.flac"
|
||||
]
|
||||
}
|
||||
]
|
@ -2,7 +2,7 @@
|
||||
{
|
||||
"messages": [
|
||||
{
|
||||
"content": "Who are they?",
|
||||
"content": "<image>Who are they?",
|
||||
"role": "user"
|
||||
},
|
||||
{
|
||||
@ -10,7 +10,7 @@
|
||||
"role": "assistant"
|
||||
},
|
||||
{
|
||||
"content": "What are they doing?",
|
||||
"content": "What are they doing?<image>",
|
||||
"role": "user"
|
||||
},
|
||||
{
|
||||
@ -19,13 +19,14 @@
|
||||
}
|
||||
],
|
||||
"images": [
|
||||
"mllm_demo_data/1.jpg",
|
||||
"mllm_demo_data/1.jpg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"messages": [
|
||||
{
|
||||
"content": "Who is he?",
|
||||
"content": "<image>Who is he?",
|
||||
"role": "user"
|
||||
},
|
||||
{
|
||||
@ -48,7 +49,7 @@
|
||||
{
|
||||
"messages": [
|
||||
{
|
||||
"content": "Please describe this image",
|
||||
"content": "<image>Please describe this image",
|
||||
"role": "user"
|
||||
},
|
||||
{
|
||||
@ -71,7 +72,7 @@
|
||||
{
|
||||
"messages": [
|
||||
{
|
||||
"content": "他们是谁?",
|
||||
"content": "<image>他们是谁?",
|
||||
"role": "user"
|
||||
},
|
||||
{
|
||||
@ -79,7 +80,7 @@
|
||||
"role": "assistant"
|
||||
},
|
||||
{
|
||||
"content": "他们在做什么?",
|
||||
"content": "他们在做什么?<image>",
|
||||
"role": "user"
|
||||
},
|
||||
{
|
||||
@ -88,13 +89,14 @@
|
||||
}
|
||||
],
|
||||
"images": [
|
||||
"mllm_demo_data/1.jpg",
|
||||
"mllm_demo_data/1.jpg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"messages": [
|
||||
{
|
||||
"content": "他是谁?",
|
||||
"content": "<image>他是谁?",
|
||||
"role": "user"
|
||||
},
|
||||
{
|
||||
@ -117,7 +119,7 @@
|
||||
{
|
||||
"messages": [
|
||||
{
|
||||
"content": "请描述这张图片",
|
||||
"content": "<image>请描述这张图片",
|
||||
"role": "user"
|
||||
},
|
||||
{
|
||||
@ -137,4 +139,4 @@
|
||||
"mllm_demo_data/3.jpg"
|
||||
]
|
||||
}
|
||||
]
|
||||
]
|
||||
|
BIN
data/mllm_demo_data/1.mp3
Normal file
BIN
data/mllm_demo_data/1.mp3
Normal file
Binary file not shown.
BIN
data/mllm_demo_data/1.mp4
Normal file
BIN
data/mllm_demo_data/1.mp4
Normal file
Binary file not shown.
BIN
data/mllm_demo_data/2.avi
Normal file
BIN
data/mllm_demo_data/2.avi
Normal file
Binary file not shown.
BIN
data/mllm_demo_data/2.wav
Normal file
BIN
data/mllm_demo_data/2.wav
Normal file
Binary file not shown.
BIN
data/mllm_demo_data/3.flac
Normal file
BIN
data/mllm_demo_data/3.flac
Normal file
Binary file not shown.
BIN
data/mllm_demo_data/3.mp4
Normal file
BIN
data/mllm_demo_data/3.mp4
Normal file
Binary file not shown.
BIN
data/mllm_demo_data/4.mp3
Normal file
BIN
data/mllm_demo_data/4.mp3
Normal file
Binary file not shown.
BIN
data/mllm_demo_data/4.mp4
Normal file
BIN
data/mllm_demo_data/4.mp4
Normal file
Binary file not shown.
56
data/mllm_video_audio_demo.json
Normal file
56
data/mllm_video_audio_demo.json
Normal file
@ -0,0 +1,56 @@
|
||||
[
|
||||
{
|
||||
"messages": [
|
||||
{
|
||||
"content": "<video><audio>What is the video describing?",
|
||||
"role": "user"
|
||||
},
|
||||
{
|
||||
"content": "A girl who is drawing a picture of a guitar and feel nervous.",
|
||||
"role": "assistant"
|
||||
}
|
||||
],
|
||||
"videos": [
|
||||
"mllm_demo_data/4.mp4"
|
||||
],
|
||||
"audios": [
|
||||
"mllm_demo_data/4.mp3"
|
||||
]
|
||||
},
|
||||
{
|
||||
"messages": [
|
||||
{
|
||||
"content": "<video><audio>What does this girl say?",
|
||||
"role": "user"
|
||||
},
|
||||
{
|
||||
"content": "She says: 'Hello! Take a look at what am I drawing!'",
|
||||
"role": "assistant"
|
||||
}
|
||||
],
|
||||
"videos": [
|
||||
"mllm_demo_data/4.mp4"
|
||||
],
|
||||
"audios": [
|
||||
"mllm_demo_data/4.mp3"
|
||||
]
|
||||
},
|
||||
{
|
||||
"messages": [
|
||||
{
|
||||
"content": "<video><audio>What is this girl drawing with?",
|
||||
"role": "user"
|
||||
},
|
||||
{
|
||||
"content": "She is drawing with an iPad.",
|
||||
"role": "assistant"
|
||||
}
|
||||
],
|
||||
"videos": [
|
||||
"mllm_demo_data/4.mp4"
|
||||
],
|
||||
"audios": [
|
||||
"mllm_demo_data/4.mp3"
|
||||
]
|
||||
}
|
||||
]
|
47
data/mllm_video_demo.json
Normal file
47
data/mllm_video_demo.json
Normal file
@ -0,0 +1,47 @@
|
||||
[
|
||||
{
|
||||
"messages": [
|
||||
{
|
||||
"content": "<video>Why is this video funny?",
|
||||
"role": "user"
|
||||
},
|
||||
{
|
||||
"content": "Because a baby is reading, and he is so cute!",
|
||||
"role": "assistant"
|
||||
}
|
||||
],
|
||||
"videos": [
|
||||
"mllm_demo_data/1.mp4"
|
||||
]
|
||||
},
|
||||
{
|
||||
"messages": [
|
||||
{
|
||||
"content": "<video>What is she doing?",
|
||||
"role": "user"
|
||||
},
|
||||
{
|
||||
"content": "She is cooking.",
|
||||
"role": "assistant"
|
||||
}
|
||||
],
|
||||
"videos": [
|
||||
"mllm_demo_data/2.avi"
|
||||
]
|
||||
},
|
||||
{
|
||||
"messages": [
|
||||
{
|
||||
"content": "<video>What's in the video?",
|
||||
"role": "user"
|
||||
},
|
||||
{
|
||||
"content": "A baby is playing in the living room.",
|
||||
"role": "assistant"
|
||||
}
|
||||
],
|
||||
"videos": [
|
||||
"mllm_demo_data/3.mp4"
|
||||
]
|
||||
}
|
||||
]
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@ -1 +0,0 @@
|
||||
736bcedea2b24a1414765c6d69cbdafaea839f3c
|
@ -1,6 +1,20 @@
|
||||
# Copyright 2025 the LlamaFactory team.
|
||||
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import json
|
||||
import os
|
||||
from typing import List
|
||||
|
||||
import datasets
|
||||
|
||||
@ -11,7 +25,7 @@ _DESCRIPTION = "UltraChat: Large-scale, Informative, and Diverse Multi-round Dia
|
||||
|
||||
_CITATION = """\
|
||||
@misc{UltraChat,
|
||||
author = {Ding, Ning and Chen, Yulin and Xu, Bokai and Hu, Shengding and Qin, Yujia and Liu, Zhiyuan and Sun, Maosong and Zhou, Bowen},
|
||||
author = {Ding, Ning and Chen, Yulin and Xu, Bokai and Hu, Shengding and others},
|
||||
title = {UltraChat: A Large-scale Auto-generated Multi-round Dialogue Data},
|
||||
year = {2023},
|
||||
publisher = {GitHub},
|
||||
@ -20,9 +34,9 @@ _CITATION = """\
|
||||
}
|
||||
"""
|
||||
|
||||
_HOMEPAGE = "{}/datasets/stingning/ultrachat".format(_HF_ENDPOINT)
|
||||
_HOMEPAGE = f"{_HF_ENDPOINT}/datasets/stingning/ultrachat"
|
||||
_LICENSE = "cc-by-nc-4.0"
|
||||
_BASE_DATA_URL = "{}/datasets/stingning/ultrachat/resolve/main/train_{{idx}}.jsonl".format(_HF_ENDPOINT)
|
||||
_BASE_DATA_URL = f"{_HF_ENDPOINT}/datasets/stingning/ultrachat/resolve/main/train_{{idx}}.jsonl"
|
||||
|
||||
|
||||
class UltraChat(datasets.GeneratorBasedBuilder):
|
||||
@ -40,16 +54,16 @@ class UltraChat(datasets.GeneratorBasedBuilder):
|
||||
file_paths = [dl_manager.download(_BASE_DATA_URL.format(idx=idx)) for idx in range(10)] # multiple shards
|
||||
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": file_paths})]
|
||||
|
||||
def _generate_examples(self, filepaths: List[str]):
|
||||
def _generate_examples(self, filepaths: list[str]):
|
||||
for filepath in filepaths:
|
||||
with open(filepath, "r", encoding="utf-8") as f:
|
||||
with open(filepath, encoding="utf-8") as f:
|
||||
for row in f:
|
||||
try:
|
||||
data = json.loads(row)
|
||||
except Exception:
|
||||
continue
|
||||
key: int = data["id"]
|
||||
content: List[str] = data["data"]
|
||||
content: list[str] = data["data"]
|
||||
if len(content) % 2 == 1:
|
||||
content.pop(-1)
|
||||
if len(content) < 2:
|
||||
|
30
data/wiki_demo.txt
Normal file
30
data/wiki_demo.txt
Normal file
File diff suppressed because one or more lines are too long
@ -1 +0,0 @@
|
||||
c9cf509b7fdac5490cfd6dae72c2d7b8a60af6cb
|
@ -1,25 +0,0 @@
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
llama-factory:
|
||||
build:
|
||||
dockerfile: Dockerfile
|
||||
context: .
|
||||
container_name: llama_factory
|
||||
volumes:
|
||||
- ./hf_cache:/root/.cache/huggingface/
|
||||
- ./data:/app/data
|
||||
- ./output:/app/output
|
||||
environment:
|
||||
- CUDA_VISIBLE_DEVICES=0
|
||||
ports:
|
||||
- "7860:7860"
|
||||
ipc: host
|
||||
deploy:
|
||||
resources:
|
||||
reservations:
|
||||
devices:
|
||||
- driver: nvidia
|
||||
count: "all"
|
||||
capabilities: [gpu]
|
||||
restart: unless-stopped
|
66
docker/docker-cuda/Dockerfile
Normal file
66
docker/docker-cuda/Dockerfile
Normal file
@ -0,0 +1,66 @@
|
||||
# https://hub.docker.com/r/hiyouga/pytorch/tags
|
||||
ARG BASE_IMAGE=hiyouga/pytorch:th2.6.0-cu124-flashattn2.7.4-cxx11abi0-devel
|
||||
FROM ${BASE_IMAGE}
|
||||
|
||||
# Installation arguments
|
||||
ARG PIP_INDEX=https://pypi.org/simple
|
||||
ARG EXTRAS=metrics
|
||||
ARG INSTALL_FLASHATTN=false
|
||||
ARG HTTP_PROXY=""
|
||||
|
||||
# Define environments
|
||||
ENV MAX_JOBS=16
|
||||
ENV FLASH_ATTENTION_FORCE_BUILD=TRUE
|
||||
ENV VLLM_WORKER_MULTIPROC_METHOD=spawn
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
ENV NODE_OPTIONS=""
|
||||
ENV PIP_ROOT_USER_ACTION=ignore
|
||||
ENV http_proxy="${HTTP_PROXY}"
|
||||
ENV https_proxy="${HTTP_PROXY}"
|
||||
|
||||
# Use Bash instead of default /bin/sh
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
|
||||
# Set the working directory
|
||||
WORKDIR /app
|
||||
|
||||
# Change pip source
|
||||
RUN pip config set global.index-url "${PIP_INDEX}" && \
|
||||
pip config set global.extra-index-url "${PIP_INDEX}" && \
|
||||
pip install --no-cache-dir --upgrade pip packaging wheel setuptools
|
||||
|
||||
# Install the requirements
|
||||
COPY requirements.txt /app
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
# Copy the rest of the application into the image
|
||||
COPY . /app
|
||||
|
||||
# Install LLaMA Factory
|
||||
RUN pip install --no-cache-dir -e ".[${EXTRAS}]" --no-build-isolation
|
||||
|
||||
# Rebuild flash attention
|
||||
RUN if [ "${INSTALL_FLASHATTN}" == "true" ]; then \
|
||||
pip uninstall -y ninja && \
|
||||
pip install --no-cache-dir ninja && \
|
||||
pip install --no-cache-dir flash-attn --no-build-isolation; \
|
||||
fi
|
||||
|
||||
# Set up volumes
|
||||
# VOLUME [ "/root/.cache/huggingface", "/app/shared_data", "/app/output" ]
|
||||
|
||||
# Expose port 7860 for LLaMA Board
|
||||
ENV GRADIO_SERVER_PORT=7860
|
||||
EXPOSE 7860
|
||||
|
||||
# Expose port 8000 for API service
|
||||
ENV API_PORT=8000
|
||||
EXPOSE 8000
|
||||
|
||||
# unset proxy
|
||||
ENV http_proxy=
|
||||
ENV https_proxy=
|
||||
|
||||
# Reset pip config
|
||||
RUN pip config unset global.index-url && \
|
||||
pip config unset global.extra-index-url
|
55
docker/docker-cuda/Dockerfile.base
Normal file
55
docker/docker-cuda/Dockerfile.base
Normal file
@ -0,0 +1,55 @@
|
||||
# Start from the pytorch official image (ubuntu-22.04 + cuda-12.4.1 + python-3.11)
|
||||
# https://hub.docker.com/r/pytorch/pytorch/tags
|
||||
FROM pytorch/pytorch:2.6.0-cuda12.4-cudnn9-devel
|
||||
|
||||
# Define environments
|
||||
ENV MAX_JOBS=16
|
||||
ENV VLLM_WORKER_MULTIPROC_METHOD=spawn
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
ENV NODE_OPTIONS=""
|
||||
ENV PIP_ROOT_USER_ACTION=ignore
|
||||
|
||||
# Define installation arguments
|
||||
ARG APT_SOURCE=https://mirrors.tuna.tsinghua.edu.cn/ubuntu/
|
||||
ARG PIP_INDEX=https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple
|
||||
|
||||
# Set apt source
|
||||
RUN cp /etc/apt/sources.list /etc/apt/sources.list.bak && \
|
||||
{ \
|
||||
echo "deb ${APT_SOURCE} jammy main restricted universe multiverse"; \
|
||||
echo "deb ${APT_SOURCE} jammy-updates main restricted universe multiverse"; \
|
||||
echo "deb ${APT_SOURCE} jammy-backports main restricted universe multiverse"; \
|
||||
echo "deb ${APT_SOURCE} jammy-security main restricted universe multiverse"; \
|
||||
} > /etc/apt/sources.list
|
||||
|
||||
# Install systemctl and wget
|
||||
RUN apt-get update && \
|
||||
apt-get install -y -o Dpkg::Options::="--force-confdef" systemd wget && \
|
||||
apt-get clean
|
||||
|
||||
# Install git and vim
|
||||
RUN apt-get update && \
|
||||
apt-get install -y git vim && \
|
||||
apt-get clean
|
||||
|
||||
# Install gcc and g++
|
||||
RUN apt-get update && \
|
||||
apt-get install -y gcc g++ && \
|
||||
apt-get clean
|
||||
|
||||
# Change pip source
|
||||
RUN pip config set global.index-url "${PIP_INDEX}" && \
|
||||
pip config set global.extra-index-url "${PIP_INDEX}" && \
|
||||
pip install --no-cache-dir --upgrade pip packaging wheel setuptools
|
||||
|
||||
# Install flash-attn-2.7.4.post1 (cxx11abi=False)
|
||||
RUN wget -nv https://github.com/Dao-AILab/flash-attention/releases/download/v2.7.4.post1/flash_attn-2.7.4.post1+cu12torch2.6cxx11abiFALSE-cp311-cp311-linux_x86_64.whl && \
|
||||
pip install --no-cache-dir flash_attn-2.7.4.post1+cu12torch2.6cxx11abiFALSE-cp311-cp311-linux_x86_64.whl
|
||||
|
||||
# Install flashinfer-0.2.2.post1+cu124 (cxx11abi=False)
|
||||
RUN wget -nv https://github.com/flashinfer-ai/flashinfer/releases/download/v0.2.2.post1/flashinfer_python-0.2.2.post1+cu124torch2.6-cp38-abi3-linux_x86_64.whl && \
|
||||
pip install --no-cache-dir flashinfer_python-0.2.2.post1+cu124torch2.6-cp38-abi3-linux_x86_64.whl
|
||||
|
||||
# Reset pip config
|
||||
RUN pip config unset global.index-url && \
|
||||
pip config unset global.extra-index-url
|
111
docker/docker-cuda/README.md
Normal file
111
docker/docker-cuda/README.md
Normal file
@ -0,0 +1,111 @@
|
||||
# Docker Setup for NVIDIA GPUs
|
||||
|
||||
This directory contains Docker configuration files for running LLaMA Factory with NVIDIA GPU support.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
### Linux-specific Requirements
|
||||
|
||||
Before running the Docker container with GPU support, you need to install the following packages:
|
||||
|
||||
1. **Docker**: The container runtime
|
||||
```bash
|
||||
# Ubuntu/Debian
|
||||
sudo apt-get update
|
||||
sudo apt-get install docker.io
|
||||
|
||||
# Or install Docker Engine from the official repository:
|
||||
# https://docs.docker.com/engine/install/
|
||||
```
|
||||
|
||||
2. **Docker Compose** (if using the docker-compose method):
|
||||
```bash
|
||||
# Ubuntu/Debian
|
||||
sudo apt-get install docker-compose
|
||||
|
||||
# Or install the latest version:
|
||||
# https://docs.docker.com/compose/install/
|
||||
```
|
||||
|
||||
3. **NVIDIA Container Toolkit** (required for GPU support):
|
||||
```bash
|
||||
# Add the NVIDIA GPG key and repository
|
||||
distribution=$(. /etc/os-release;echo $ID$VERSION_ID)
|
||||
curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | sudo apt-key add -
|
||||
curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.list | sudo tee /etc/apt/sources.list.d/nvidia-docker.list
|
||||
|
||||
# Install nvidia-container-toolkit
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y nvidia-container-toolkit
|
||||
|
||||
# Restart Docker to apply changes
|
||||
sudo systemctl restart docker
|
||||
```
|
||||
|
||||
**Note**: Without `nvidia-container-toolkit`, the Docker container will not be able to access your NVIDIA GPU.
|
||||
|
||||
### Verify GPU Access
|
||||
|
||||
After installation, verify that Docker can access your GPU:
|
||||
|
||||
```bash
|
||||
sudo docker run --rm --gpus all nvidia/cuda:12.4.0-base-ubuntu22.04 nvidia-smi
|
||||
```
|
||||
|
||||
If successful, you should see your GPU information displayed.
|
||||
|
||||
## Usage
|
||||
|
||||
### Using Docker Compose (Recommended)
|
||||
|
||||
```bash
|
||||
cd docker/docker-cuda/
|
||||
docker compose up -d
|
||||
docker compose exec llamafactory bash
|
||||
```
|
||||
|
||||
### Using Docker Run
|
||||
|
||||
```bash
|
||||
# Build the image
|
||||
docker build -f ./docker/docker-cuda/Dockerfile \
|
||||
--build-arg PIP_INDEX=https://pypi.org/simple \
|
||||
--build-arg EXTRAS=metrics \
|
||||
-t llamafactory:latest .
|
||||
|
||||
# Run the container
|
||||
docker run -dit --ipc=host --gpus=all \
|
||||
-p 7860:7860 \
|
||||
-p 8000:8000 \
|
||||
--name llamafactory \
|
||||
llamafactory:latest
|
||||
|
||||
# Enter the container
|
||||
docker exec -it llamafactory bash
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### GPU Not Detected
|
||||
|
||||
If your GPU is not detected inside the container:
|
||||
|
||||
1. Ensure `nvidia-container-toolkit` is installed
|
||||
2. Check that the Docker daemon has been restarted after installation
|
||||
3. Verify your NVIDIA drivers are properly installed: `nvidia-smi`
|
||||
4. Check Docker GPU support: `docker run --rm --gpus all ubuntu nvidia-smi`
|
||||
|
||||
### Permission Denied
|
||||
|
||||
If you get permission errors, ensure your user is in the docker group:
|
||||
|
||||
```bash
|
||||
sudo usermod -aG docker $USER
|
||||
# Log out and back in for changes to take effect
|
||||
```
|
||||
|
||||
## Additional Notes
|
||||
|
||||
- The default image is built on Ubuntu 22.04 (x86_64), CUDA 12.4, Python 3.11, PyTorch 2.6.0, and Flash-attn 2.7.4
|
||||
- For different CUDA versions, you may need to adjust the base image in the Dockerfile
|
||||
- Make sure your NVIDIA driver version is compatible with the CUDA version used in the Docker image
|
25
docker/docker-cuda/docker-compose.yml
Normal file
25
docker/docker-cuda/docker-compose.yml
Normal file
@ -0,0 +1,25 @@
|
||||
services:
|
||||
llamafactory:
|
||||
build:
|
||||
dockerfile: ./docker/docker-cuda/Dockerfile
|
||||
context: ../..
|
||||
args:
|
||||
PIP_INDEX: https://pypi.org/simple
|
||||
EXTRAS: metrics
|
||||
container_name: llamafactory
|
||||
ports:
|
||||
- "7860:7860"
|
||||
- "8000:8000"
|
||||
ipc: host
|
||||
tty: true
|
||||
# shm_size: "16gb" # ipc: host is set
|
||||
stdin_open: true
|
||||
command: bash
|
||||
deploy:
|
||||
resources:
|
||||
reservations:
|
||||
devices:
|
||||
- driver: nvidia
|
||||
count: "all"
|
||||
capabilities: [ gpu ]
|
||||
restart: unless-stopped
|
63
docker/docker-npu/Dockerfile
Normal file
63
docker/docker-npu/Dockerfile
Normal file
@ -0,0 +1,63 @@
|
||||
# https://hub.docker.com/r/ascendai/cann/tags
|
||||
ARG BASE_IMAGE=ascendai/cann:8.1.rc1-910b-ubuntu22.04-py3.11
|
||||
FROM ${BASE_IMAGE}
|
||||
|
||||
# Installation arguments
|
||||
ARG PIP_INDEX=https://pypi.org/simple
|
||||
ARG EXTRAS=torch-npu,metrics
|
||||
ARG HTTP_PROXY=""
|
||||
ARG PYTORCH_INDEX=https://download.pytorch.org/whl/cpu
|
||||
|
||||
# Define environments
|
||||
ENV MAX_JOBS=16
|
||||
ENV FLASH_ATTENTION_FORCE_BUILD=TRUE
|
||||
ENV VLLM_WORKER_MULTIPROC_METHOD=spawn
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
ENV NODE_OPTIONS=""
|
||||
ENV PIP_ROOT_USER_ACTION=ignore
|
||||
ENV http_proxy="${HTTP_PROXY}"
|
||||
ENV https_proxy="${HTTP_PROXY}"
|
||||
|
||||
# Use Bash instead of default /bin/sh
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
|
||||
# Set the working directory
|
||||
WORKDIR /app
|
||||
|
||||
# Change pip source
|
||||
RUN pip config set global.index-url "${PIP_INDEX}" && \
|
||||
pip config set global.extra-index-url "${PIP_INDEX}" && \
|
||||
pip install --no-cache-dir --upgrade pip packaging wheel setuptools
|
||||
|
||||
# Install torch-npu
|
||||
RUN pip uninstall -y torch torchvision torchaudio && \
|
||||
pip install --no-cache-dir "torch-npu==2.5.1" "torchvision==0.20.1" --index-url "${PYTORCH_INDEX}"
|
||||
|
||||
# Install the requirements
|
||||
COPY requirements.txt /app
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
# Copy the rest of the application into the image
|
||||
COPY . /app
|
||||
|
||||
# Install LLaMA Factory
|
||||
RUN pip install --no-cache-dir -e ".[${EXTRAS}]" --no-build-isolation
|
||||
|
||||
# Set up volumes
|
||||
# VOLUME [ "/root/.cache/huggingface", "/app/shared_data", "/app/output" ]
|
||||
|
||||
# Expose port 7860 for LLaMA Board
|
||||
ENV GRADIO_SERVER_PORT=7860
|
||||
EXPOSE 7860
|
||||
|
||||
# Expose port 8000 for API service
|
||||
ENV API_PORT=8000
|
||||
EXPOSE 8000
|
||||
|
||||
# unset proxy
|
||||
ENV http_proxy=
|
||||
ENV https_proxy=
|
||||
|
||||
# Reset pip config
|
||||
RUN pip config unset global.index-url && \
|
||||
pip config unset global.extra-index-url
|
28
docker/docker-npu/docker-compose.yml
Normal file
28
docker/docker-npu/docker-compose.yml
Normal file
@ -0,0 +1,28 @@
|
||||
services:
|
||||
llamafactory:
|
||||
build:
|
||||
dockerfile: ./docker/docker-npu/Dockerfile
|
||||
context: ../..
|
||||
args:
|
||||
PIP_INDEX: https://pypi.org/simple
|
||||
EXTRAS: torch-npu,metrics
|
||||
container_name: llamafactory
|
||||
volumes:
|
||||
- /usr/local/dcmi:/usr/local/dcmi
|
||||
- /usr/local/bin/npu-smi:/usr/local/bin/npu-smi
|
||||
- /usr/local/Ascend/driver:/usr/local/Ascend/driver
|
||||
- /etc/ascend_install.info:/etc/ascend_install.info
|
||||
ports:
|
||||
- "7860:7860"
|
||||
- "8000:8000"
|
||||
ipc: host
|
||||
tty: true
|
||||
# shm_size: "16gb" # ipc: host is set
|
||||
stdin_open: true
|
||||
command: bash
|
||||
devices:
|
||||
- /dev/davinci0
|
||||
- /dev/davinci_manager
|
||||
- /dev/devmm_svm
|
||||
- /dev/hisi_hdc
|
||||
restart: unless-stopped
|
71
docker/docker-rocm/Dockerfile
Normal file
71
docker/docker-rocm/Dockerfile
Normal file
@ -0,0 +1,71 @@
|
||||
# https://hub.docker.com/r/rocm/pytorch/tags
|
||||
ARG BASE_IMAGE=rocm/pytorch:rocm6.4.1_ubuntu22.04_py3.10_pytorch_release_2.6.0
|
||||
FROM ${BASE_IMAGE}
|
||||
|
||||
# Installation arguments
|
||||
ARG PIP_INDEX=https://pypi.org/simple
|
||||
ARG EXTRAS=metrics
|
||||
ARG INSTALL_FLASHATTN=false
|
||||
ARG HTTP_PROXY=""
|
||||
ARG PYTORCH_INDEX=https://download.pytorch.org/whl/rocm6.3
|
||||
|
||||
# Define environments
|
||||
ENV MAX_JOBS=16
|
||||
ENV FLASH_ATTENTION_FORCE_BUILD=TRUE
|
||||
ENV VLLM_WORKER_MULTIPROC_METHOD=spawn
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
ENV NODE_OPTIONS=""
|
||||
ENV PIP_ROOT_USER_ACTION=ignore
|
||||
ENV http_proxy="${HTTP_PROXY}"
|
||||
ENV https_proxy="${HTTP_PROXY}"
|
||||
|
||||
# Use Bash instead of default /bin/sh
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
|
||||
# Set the working directory
|
||||
WORKDIR /app
|
||||
|
||||
# Change pip source
|
||||
RUN pip config set global.index-url "${PIP_INDEX}" && \
|
||||
pip config set global.extra-index-url "${PIP_INDEX}" && \
|
||||
pip install --no-cache-dir --upgrade pip packaging wheel setuptools
|
||||
|
||||
# Reinstall pytorch rocm
|
||||
RUN pip uninstall -y torch torchvision torchaudio && \
|
||||
pip install --no-cache-dir --pre torch torchvision torchaudio --index-url "${PYTORCH_INDEX}"
|
||||
|
||||
# Install the requirements
|
||||
COPY requirements.txt /app
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
# Copy the rest of the application into the image
|
||||
COPY . /app
|
||||
|
||||
# Install LLaMA Factory
|
||||
RUN pip install --no-cache-dir -e ".[${EXTRAS}]" --no-build-isolation
|
||||
|
||||
# Rebuild flash attention
|
||||
RUN if [ "${INSTALL_FLASHATTN}" == "true" ]; then \
|
||||
pip uninstall -y ninja && \
|
||||
pip install --no-cache-dir ninja && \
|
||||
pip install --no-cache-dir flash-attn --no-build-isolation; \
|
||||
fi
|
||||
|
||||
# Set up volumes
|
||||
# VOLUME [ "/root/.cache/huggingface", "/app/shared_data", "/app/output" ]
|
||||
|
||||
# Expose port 7860 for LLaMA Board
|
||||
ENV GRADIO_SERVER_PORT=7860
|
||||
EXPOSE 7860
|
||||
|
||||
# Expose port 8000 for API service
|
||||
ENV API_PORT=8000
|
||||
EXPOSE 8000
|
||||
|
||||
# unset proxy
|
||||
ENV http_proxy=
|
||||
ENV https_proxy=
|
||||
|
||||
# Reset pip config
|
||||
RUN pip config unset global.index-url && \
|
||||
pip config unset global.extra-index-url
|
21
docker/docker-rocm/docker-compose.yml
Normal file
21
docker/docker-rocm/docker-compose.yml
Normal file
@ -0,0 +1,21 @@
|
||||
services:
|
||||
llamafactory:
|
||||
build:
|
||||
dockerfile: ./docker/docker-rocm/Dockerfile
|
||||
context: ../..
|
||||
args:
|
||||
PIP_INDEX: https://pypi.org/simple
|
||||
EXTRAS: metrics
|
||||
container_name: llamafactory
|
||||
ports:
|
||||
- "7860:7860"
|
||||
- "8000:8000"
|
||||
ipc: host
|
||||
tty: true
|
||||
# shm_size: "16gb" # ipc: host is set
|
||||
stdin_open: true
|
||||
command: bash
|
||||
devices:
|
||||
- /dev/kfd:/dev/kfd
|
||||
- /dev/dri:/dev/dri
|
||||
restart: unless-stopped
|
@ -1,3 +1,4 @@
|
||||
# Copyright 2025 the LlamaFactory team.
|
||||
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@ -11,6 +12,7 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
|
||||
import datasets
|
||||
@ -20,14 +22,15 @@ import pandas as pd
|
||||
_CITATION = """\
|
||||
@article{huang2023ceval,
|
||||
title={C-Eval: A Multi-Level Multi-Discipline Chinese Evaluation Suite for Foundation Models},
|
||||
author={Huang, Yuzhen and Bai, Yuzhuo and Zhu, Zhihao and Zhang, Junlei and Zhang, Jinghan and Su, Tangjun and Liu, Junteng and Lv, Chuancheng and Zhang, Yikai and Lei, Jiayi and Fu, Yao and Sun, Maosong and He, Junxian},
|
||||
author={Huang, Yuzhen and Bai, Yuzhuo and Zhu, Zhihao and others},
|
||||
journal={arXiv preprint arXiv:2305.08322},
|
||||
year={2023}
|
||||
}
|
||||
"""
|
||||
|
||||
_DESCRIPTION = """\
|
||||
C-Eval is a comprehensive Chinese evaluation suite for foundation models. It consists of 13948 multi-choice questions spanning 52 diverse disciplines and four difficulty levels.
|
||||
C-Eval is a comprehensive Chinese evaluation suite for foundation models.
|
||||
It consists of 13948 multi-choice questions spanning 52 diverse disciplines and four difficulty levels.
|
||||
"""
|
||||
|
||||
_HOMEPAGE = "https://cevalbenchmark.com"
|
||||
|
@ -207,4 +207,4 @@
|
||||
"name": "兽医学",
|
||||
"category": "STEM"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,3 +1,4 @@
|
||||
# Copyright 2025 the LlamaFactory team.
|
||||
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@ -11,6 +12,7 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
|
||||
import datasets
|
||||
@ -20,14 +22,15 @@ import pandas as pd
|
||||
_CITATION = """\
|
||||
@article{li2023cmmlu,
|
||||
title={CMMLU: Measuring massive multitask language understanding in Chinese},
|
||||
author={Haonan Li and Yixuan Zhang and Fajri Koto and Yifei Yang and Hai Zhao and Yeyun Gong and Nan Duan and Timothy Baldwin},
|
||||
author={Haonan Li and Yixuan Zhang and Fajri Koto and Yifei Yang and others,
|
||||
journal={arXiv preprint arXiv:2306.09212},
|
||||
year={2023}
|
||||
}
|
||||
"""
|
||||
|
||||
_DESCRIPTION = """\
|
||||
CMMLU is a comprehensive Chinese assessment suite specifically designed to evaluate the advanced knowledge and reasoning abilities of LLMs within the Chinese language and cultural context.
|
||||
CMMLU is a comprehensive Chinese assessment suite specifically designed to evaluate the advanced knowledge
|
||||
and reasoning abilities of LLMs within the Chinese language and cultural context.
|
||||
"""
|
||||
|
||||
_HOMEPAGE = "https://github.com/haonan-li/CMMLU"
|
||||
|
@ -267,4 +267,4 @@
|
||||
"name": "世界宗教",
|
||||
"category": "Humanities"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -227,4 +227,4 @@
|
||||
"name": "world religions",
|
||||
"category": "Humanities"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,3 +1,4 @@
|
||||
# Copyright 2025 the LlamaFactory team.
|
||||
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@ -11,6 +12,7 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
|
||||
import datasets
|
||||
@ -20,14 +22,15 @@ import pandas as pd
|
||||
_CITATION = """\
|
||||
@article{hendryckstest2021,
|
||||
title={Measuring Massive Multitask Language Understanding},
|
||||
author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},
|
||||
author={Dan Hendrycks and Collin Burns and others},
|
||||
journal={Proceedings of the International Conference on Learning Representations (ICLR)},
|
||||
year={2021}
|
||||
}
|
||||
"""
|
||||
|
||||
_DESCRIPTION = """\
|
||||
Measuring Massive Multitask Language Understanding by Dan Hendrycks, Collin Burns, Steven Basart, Andy Zou, Mantas Mazeika, Dawn Song, and Jacob Steinhardt (ICLR 2021).
|
||||
Measuring Massive Multitask Language Understanding by Dan Hendrycks, Collin Burns, Steven Basart,
|
||||
Andy Zou, Mantas Mazeika, Dawn Song, and Jacob Steinhardt (ICLR 2021).
|
||||
"""
|
||||
|
||||
_HOMEPAGE = "https://github.com/hendrycks/test"
|
||||
@ -154,8 +157,7 @@ class MMLU(datasets.GeneratorBasedBuilder):
|
||||
]
|
||||
|
||||
def _generate_examples(self, filepath):
|
||||
df = pd.read_csv(filepath)
|
||||
df = pd.read_csv(filepath, header=None)
|
||||
df.columns = ["question", "A", "B", "C", "D", "answer"]
|
||||
|
||||
for i, instance in enumerate(df.to_dict(orient="records")):
|
||||
yield i, instance
|
||||
yield from enumerate(df.to_dict(orient="records"))
|
||||
|
@ -4,59 +4,85 @@ Make sure to execute these commands in the `LLaMA-Factory` directory.
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [LoRA Fine-Tuning on A Single GPU](#lora-fine-tuning-on-a-single-gpu)
|
||||
- [QLoRA Fine-Tuning on a Single GPU](#qlora-fine-tuning-on-a-single-gpu)
|
||||
- [LoRA Fine-Tuning on Multiple GPUs](#lora-fine-tuning-on-multiple-gpus)
|
||||
- [LoRA Fine-Tuning on Multiple NPUs](#lora-fine-tuning-on-multiple-npus)
|
||||
- [Full-Parameter Fine-Tuning on Multiple GPUs](#full-parameter-fine-tuning-on-multiple-gpus)
|
||||
- [LoRA Fine-Tuning](#lora-fine-tuning)
|
||||
- [QLoRA Fine-Tuning](#qlora-fine-tuning)
|
||||
- [Full-Parameter Fine-Tuning](#full-parameter-fine-tuning)
|
||||
- [Merging LoRA Adapters and Quantization](#merging-lora-adapters-and-quantization)
|
||||
- [Inferring LoRA Fine-Tuned Models](#inferring-lora-fine-tuned-models)
|
||||
- [Extras](#extras)
|
||||
|
||||
Use `CUDA_VISIBLE_DEVICES` (GPU) or `ASCEND_RT_VISIBLE_DEVICES` (NPU) to choose computing devices.
|
||||
|
||||
By default, LLaMA-Factory uses all visible computing devices.
|
||||
|
||||
Basic usage:
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/train_lora/llama3_lora_sft.yaml
|
||||
```
|
||||
|
||||
Advanced usage:
|
||||
|
||||
```bash
|
||||
CUDA_VISIBLE_DEVICES=0,1 llamafactory-cli train examples/train_lora/llama3_lora_sft.yaml \
|
||||
learning_rate=1e-5 \
|
||||
logging_steps=1
|
||||
```
|
||||
|
||||
```bash
|
||||
bash examples/train_lora/llama3_lora_sft.sh
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
### LoRA Fine-Tuning on A Single GPU
|
||||
### LoRA Fine-Tuning
|
||||
|
||||
#### (Continuous) Pre-Training
|
||||
|
||||
```bash
|
||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/lora_single_gpu/llama3_lora_pretrain.yaml
|
||||
llamafactory-cli train examples/train_lora/llama3_lora_pretrain.yaml
|
||||
```
|
||||
|
||||
#### Supervised Fine-Tuning
|
||||
|
||||
```bash
|
||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/lora_single_gpu/llama3_lora_sft.yaml
|
||||
llamafactory-cli train examples/train_lora/llama3_lora_sft.yaml
|
||||
```
|
||||
|
||||
#### Multimodal Supervised Fine-Tuning
|
||||
|
||||
```bash
|
||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/lora_single_gpu/llava1_5_lora_sft.yaml
|
||||
llamafactory-cli train examples/train_lora/qwen2_5vl_lora_sft.yaml
|
||||
```
|
||||
|
||||
#### DPO/ORPO/SimPO Training
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/train_lora/llama3_lora_dpo.yaml
|
||||
```
|
||||
|
||||
#### Multimodal DPO/ORPO/SimPO Training
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/train_lora/qwen2_5vl_lora_dpo.yaml
|
||||
```
|
||||
|
||||
#### Reward Modeling
|
||||
|
||||
```bash
|
||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/lora_single_gpu/llama3_lora_reward.yaml
|
||||
llamafactory-cli train examples/train_lora/llama3_lora_reward.yaml
|
||||
```
|
||||
|
||||
#### PPO Training
|
||||
|
||||
```bash
|
||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/lora_single_gpu/llama3_lora_ppo.yaml
|
||||
llamafactory-cli train examples/train_lora/llama3_lora_ppo.yaml
|
||||
```
|
||||
|
||||
#### DPO Training
|
||||
#### KTO Training
|
||||
|
||||
```bash
|
||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/lora_single_gpu/llama3_lora_dpo.yaml
|
||||
```
|
||||
|
||||
#### ORPO Training
|
||||
|
||||
```bash
|
||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/lora_single_gpu/llama3_lora_orpo.yaml
|
||||
llamafactory-cli train examples/train_lora/llama3_lora_kto.yaml
|
||||
```
|
||||
|
||||
#### Preprocess Dataset
|
||||
@ -64,93 +90,93 @@ CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/lora_single_gpu/llama3_lo
|
||||
It is useful for large dataset, use `tokenized_path` in config to load the preprocessed dataset.
|
||||
|
||||
```bash
|
||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/lora_single_gpu/llama3_preprocess.yaml
|
||||
llamafactory-cli train examples/train_lora/llama3_preprocess.yaml
|
||||
```
|
||||
|
||||
#### Evaluating on MMLU/CMMLU/C-Eval Benchmarks
|
||||
|
||||
```bash
|
||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli eval examples/lora_single_gpu/llama3_lora_eval.yaml
|
||||
llamafactory-cli eval examples/train_lora/llama3_lora_eval.yaml
|
||||
```
|
||||
|
||||
#### Batch Predicting and Computing BLEU and ROUGE Scores
|
||||
#### Supervised Fine-Tuning on Multiple Nodes
|
||||
|
||||
```bash
|
||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/lora_single_gpu/llama3_lora_predict.yaml
|
||||
```
|
||||
|
||||
### QLoRA Fine-Tuning on a Single GPU
|
||||
|
||||
#### Supervised Fine-Tuning with 4/8-bit Bitsandbytes Quantization (Recommended)
|
||||
|
||||
```bash
|
||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/qlora_single_gpu/llama3_lora_sft_bitsandbytes.yaml
|
||||
```
|
||||
|
||||
#### Supervised Fine-Tuning with 4/8-bit GPTQ Quantization
|
||||
|
||||
```bash
|
||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/qlora_single_gpu/llama3_lora_sft_gptq.yaml
|
||||
```
|
||||
|
||||
#### Supervised Fine-Tuning with 4-bit AWQ Quantization
|
||||
|
||||
```bash
|
||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/qlora_single_gpu/llama3_lora_sft_awq.yaml
|
||||
```
|
||||
|
||||
#### Supervised Fine-Tuning with 2-bit AQLM Quantization
|
||||
|
||||
```bash
|
||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/qlora_single_gpu/llama3_lora_sft_aqlm.yaml
|
||||
```
|
||||
|
||||
### LoRA Fine-Tuning on Multiple GPUs
|
||||
|
||||
#### Supervised Fine-Tuning with Accelerate on Single Node
|
||||
|
||||
```bash
|
||||
bash examples/lora_multi_gpu/single_node.sh
|
||||
```
|
||||
|
||||
#### Supervised Fine-Tuning with Accelerate on Multiple Nodes
|
||||
|
||||
```bash
|
||||
bash examples/lora_multi_gpu/multi_node.sh
|
||||
FORCE_TORCHRUN=1 NNODES=2 NODE_RANK=0 MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/train_lora/llama3_lora_sft.yaml
|
||||
FORCE_TORCHRUN=1 NNODES=2 NODE_RANK=1 MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/train_lora/llama3_lora_sft.yaml
|
||||
```
|
||||
|
||||
#### Supervised Fine-Tuning with DeepSpeed ZeRO-3 (Weight Sharding)
|
||||
|
||||
```bash
|
||||
bash examples/lora_multi_gpu/ds_zero3.sh
|
||||
FORCE_TORCHRUN=1 llamafactory-cli train examples/train_lora/llama3_lora_sft_ds3.yaml
|
||||
```
|
||||
|
||||
### LoRA Fine-Tuning on Multiple NPUs
|
||||
|
||||
#### Supervised Fine-Tuning with DeepSpeed ZeRO-0
|
||||
#### Supervised Fine-Tuning with Ray on 4 GPUs
|
||||
|
||||
```bash
|
||||
bash examples/lora_multi_npu/ds_zero0.sh
|
||||
USE_RAY=1 llamafactory-cli train examples/train_lora/llama3_lora_sft_ray.yaml
|
||||
```
|
||||
|
||||
### Full-Parameter Fine-Tuning on Multiple GPUs
|
||||
### QLoRA Fine-Tuning
|
||||
|
||||
#### Supervised Fine-Tuning with Accelerate on Single Node
|
||||
#### Supervised Fine-Tuning with 4/8-bit Bitsandbytes/HQQ/EETQ Quantization (Recommended)
|
||||
|
||||
```bash
|
||||
bash examples/full_multi_gpu/single_node.sh
|
||||
llamafactory-cli train examples/train_qlora/llama3_lora_sft_otfq.yaml
|
||||
```
|
||||
|
||||
#### Supervised Fine-Tuning with Accelerate on Multiple Nodes
|
||||
#### Supervised Fine-Tuning with 4-bit Bitsandbytes Quantization on Ascend NPU
|
||||
|
||||
```bash
|
||||
bash examples/full_multi_gpu/multi_node.sh
|
||||
llamafactory-cli train examples/train_qlora/llama3_lora_sft_bnb_npu.yaml
|
||||
```
|
||||
|
||||
#### Batch Predicting and Computing BLEU and ROUGE Scores
|
||||
#### Supervised Fine-Tuning with 4/8-bit GPTQ Quantization
|
||||
|
||||
```bash
|
||||
bash examples/full_multi_gpu/predict.sh
|
||||
llamafactory-cli train examples/train_qlora/llama3_lora_sft_gptq.yaml
|
||||
```
|
||||
|
||||
#### Supervised Fine-Tuning with 4-bit AWQ Quantization
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/train_qlora/llama3_lora_sft_awq.yaml
|
||||
```
|
||||
|
||||
#### Supervised Fine-Tuning with 2-bit AQLM Quantization
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/train_qlora/llama3_lora_sft_aqlm.yaml
|
||||
```
|
||||
|
||||
### Full-Parameter Fine-Tuning
|
||||
|
||||
#### Supervised Fine-Tuning on Single Node
|
||||
|
||||
```bash
|
||||
FORCE_TORCHRUN=1 llamafactory-cli train examples/train_full/llama3_full_sft.yaml
|
||||
```
|
||||
|
||||
#### Supervised Fine-Tuning on Multiple Nodes
|
||||
|
||||
```bash
|
||||
FORCE_TORCHRUN=1 NNODES=2 NODE_RANK=0 MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/train_full/llama3_full_sft.yaml
|
||||
FORCE_TORCHRUN=1 NNODES=2 NODE_RANK=1 MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/train_full/llama3_full_sft.yaml
|
||||
```
|
||||
|
||||
### Elastic and Fault-Tolerant Supervised Fine-Tuning on Multiple Nodes
|
||||
|
||||
To launch an elastic job with `MAX_RESTARTS` failures retries, run the following on at least `MIN_NNODES` nodes and at most `MAX_NNODES` nodes. `RDZV_ID` should be set as a unique job id (shared by all nodes participating in the job). See also [torchrun](https://docs.pytorch.org/docs/stable/elastic/run.html).
|
||||
|
||||
```bash
|
||||
FORCE_TORCHRUN=1 MIN_NNODES=1 MAX_NNODES=3 MAX_RESTARTS=3 RDZV_ID=llamafactory MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/train_full/llama3_full_sft.yaml
|
||||
```
|
||||
|
||||
#### Multimodal Supervised Fine-Tuning
|
||||
|
||||
```bash
|
||||
FORCE_TORCHRUN=1 llamafactory-cli train examples/train_full/qwen2_5vl_full_sft.yaml
|
||||
```
|
||||
|
||||
### Merging LoRA Adapters and Quantization
|
||||
@ -160,33 +186,46 @@ bash examples/full_multi_gpu/predict.sh
|
||||
Note: DO NOT use quantized model or `quantization_bit` when merging LoRA adapters.
|
||||
|
||||
```bash
|
||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli export examples/merge_lora/llama3_lora_sft.yaml
|
||||
llamafactory-cli export examples/merge_lora/llama3_lora_sft.yaml
|
||||
```
|
||||
|
||||
#### Quantizing Model using AutoGPTQ
|
||||
|
||||
```bash
|
||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli export examples/merge_lora/llama3_gptq.yaml
|
||||
llamafactory-cli export examples/merge_lora/llama3_gptq.yaml
|
||||
```
|
||||
|
||||
### Save Ollama modelfile
|
||||
|
||||
```bash
|
||||
llamafactory-cli export examples/merge_lora/llama3_full_sft.yaml
|
||||
```
|
||||
|
||||
### Inferring LoRA Fine-Tuned Models
|
||||
|
||||
#### Use CLI
|
||||
#### Evaluation using vLLM's Multi-GPU Inference
|
||||
|
||||
```bash
|
||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli chat examples/merge_lora/llama3_lora_sft.yaml
|
||||
```
|
||||
python scripts/vllm_infer.py --model_name_or_path meta-llama/Meta-Llama-3-8B-Instruct --template llama3 --dataset alpaca_en_demo
|
||||
python scripts/eval_bleu_rouge.py generated_predictions.jsonl
|
||||
```
|
||||
|
||||
#### Use Web UI
|
||||
#### Use CLI ChatBox
|
||||
|
||||
```bash
|
||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli webchat examples/merge_lora/llama3_lora_sft.yaml
|
||||
llamafactory-cli chat examples/inference/llama3_lora_sft.yaml
|
||||
```
|
||||
|
||||
#### Use Web UI ChatBox
|
||||
|
||||
```bash
|
||||
llamafactory-cli webchat examples/inference/llama3_lora_sft.yaml
|
||||
```
|
||||
|
||||
#### Launch OpenAI-style API
|
||||
|
||||
```bash
|
||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli api examples/merge_lora/llama3_lora_sft.yaml
|
||||
llamafactory-cli api examples/inference/llama3_lora_sft.yaml
|
||||
```
|
||||
|
||||
### Extras
|
||||
@ -194,36 +233,60 @@ CUDA_VISIBLE_DEVICES=0 llamafactory-cli api examples/merge_lora/llama3_lora_sft.
|
||||
#### Full-Parameter Fine-Tuning using GaLore
|
||||
|
||||
```bash
|
||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/extras/galore/llama3_full_sft.yaml
|
||||
llamafactory-cli train examples/extras/galore/llama3_full_sft.yaml
|
||||
```
|
||||
|
||||
#### Full-Parameter Fine-Tuning using APOLLO
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/extras/apollo/llama3_full_sft.yaml
|
||||
```
|
||||
|
||||
#### Full-Parameter Fine-Tuning using BAdam
|
||||
|
||||
```bash
|
||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/extras/badam/llama3_full_sft.yaml
|
||||
llamafactory-cli train examples/extras/badam/llama3_full_sft.yaml
|
||||
```
|
||||
|
||||
#### Full-Parameter Fine-Tuning using Adam-mini
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/extras/adam_mini/qwen2_full_sft.yaml
|
||||
```
|
||||
|
||||
#### Full-Parameter Fine-Tuning using Muon
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/extras/muon/qwen2_full_sft.yaml
|
||||
```
|
||||
|
||||
#### LoRA+ Fine-Tuning
|
||||
|
||||
```bash
|
||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/extras/loraplus/llama3_lora_sft.yaml
|
||||
llamafactory-cli train examples/extras/loraplus/llama3_lora_sft.yaml
|
||||
```
|
||||
|
||||
#### PiSSA Fine-Tuning
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/extras/pissa/llama3_lora_sft.yaml
|
||||
```
|
||||
|
||||
#### Mixture-of-Depths Fine-Tuning
|
||||
|
||||
```bash
|
||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/extras/mod/llama3_full_sft.yaml
|
||||
llamafactory-cli train examples/extras/mod/llama3_full_sft.yaml
|
||||
```
|
||||
|
||||
#### LLaMA-Pro Fine-Tuning
|
||||
|
||||
```bash
|
||||
bash examples/extras/llama_pro/expand.sh
|
||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/extras/llama_pro/llama3_freeze_sft.yaml
|
||||
llamafactory-cli train examples/extras/llama_pro/llama3_freeze_sft.yaml
|
||||
```
|
||||
|
||||
#### FSDP+QLoRA Fine-Tuning
|
||||
|
||||
```bash
|
||||
bash examples/extras/fsdp_qlora/single_node.sh
|
||||
bash examples/extras/fsdp_qlora/train.sh
|
||||
```
|
||||
|
@ -4,59 +4,85 @@
|
||||
|
||||
## 目录
|
||||
|
||||
- [单 GPU LoRA 微调](#单-gpu-lora-微调)
|
||||
- [单 GPU QLoRA 微调](#单-gpu-qlora-微调)
|
||||
- [多 GPU LoRA 微调](#多-gpu-lora-微调)
|
||||
- [多 NPU LoRA 微调](#多-npu-lora-微调)
|
||||
- [多 GPU 全参数微调](#多-gpu-全参数微调)
|
||||
- [LoRA 微调](#lora-微调)
|
||||
- [QLoRA 微调](#qlora-微调)
|
||||
- [全参数微调](#全参数微调)
|
||||
- [合并 LoRA 适配器与模型量化](#合并-lora-适配器与模型量化)
|
||||
- [推理 LoRA 模型](#推理-lora-模型)
|
||||
- [杂项](#杂项)
|
||||
|
||||
使用 `CUDA_VISIBLE_DEVICES`(GPU)或 `ASCEND_RT_VISIBLE_DEVICES`(NPU)选择计算设备。
|
||||
|
||||
LLaMA-Factory 默认使用所有可见的计算设备。
|
||||
|
||||
基础用法:
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/train_lora/llama3_lora_sft.yaml
|
||||
```
|
||||
|
||||
高级用法:
|
||||
|
||||
```bash
|
||||
CUDA_VISIBLE_DEVICES=0,1 llamafactory-cli train examples/train_lora/llama3_lora_sft.yaml \
|
||||
learning_rate=1e-5 \
|
||||
logging_steps=1
|
||||
```
|
||||
|
||||
```bash
|
||||
bash examples/train_lora/llama3_lora_sft.sh
|
||||
```
|
||||
|
||||
## 示例
|
||||
|
||||
### 单 GPU LoRA 微调
|
||||
### LoRA 微调
|
||||
|
||||
#### (增量)预训练
|
||||
|
||||
```bash
|
||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/lora_single_gpu/llama3_lora_pretrain.yaml
|
||||
llamafactory-cli train examples/train_lora/llama3_lora_pretrain.yaml
|
||||
```
|
||||
|
||||
#### 指令监督微调
|
||||
|
||||
```bash
|
||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/lora_single_gpu/llama3_lora_sft.yaml
|
||||
llamafactory-cli train examples/train_lora/llama3_lora_sft.yaml
|
||||
```
|
||||
|
||||
#### 多模态指令监督微调
|
||||
|
||||
```bash
|
||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/lora_single_gpu/llava1_5_lora_sft.yaml
|
||||
llamafactory-cli train examples/train_lora/qwen2_5vl_lora_sft.yaml
|
||||
```
|
||||
|
||||
#### DPO/ORPO/SimPO 训练
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/train_lora/llama3_lora_dpo.yaml
|
||||
```
|
||||
|
||||
#### 多模态 DPO/ORPO/SimPO 训练
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/train_lora/qwen2_5vl_lora_dpo.yaml
|
||||
```
|
||||
|
||||
#### 奖励模型训练
|
||||
|
||||
```bash
|
||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/lora_single_gpu/llama3_lora_reward.yaml
|
||||
llamafactory-cli train examples/train_lora/llama3_lora_reward.yaml
|
||||
```
|
||||
|
||||
#### PPO 训练
|
||||
|
||||
```bash
|
||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/lora_single_gpu/llama3_lora_ppo.yaml
|
||||
llamafactory-cli train examples/train_lora/llama3_lora_ppo.yaml
|
||||
```
|
||||
|
||||
#### DPO 训练
|
||||
#### KTO 训练
|
||||
|
||||
```bash
|
||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/lora_single_gpu/llama3_lora_dpo.yaml
|
||||
```
|
||||
|
||||
#### ORPO 训练
|
||||
|
||||
```bash
|
||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/lora_single_gpu/llama3_lora_orpo.yaml
|
||||
llamafactory-cli train examples/train_lora/llama3_lora_kto.yaml
|
||||
```
|
||||
|
||||
#### 预处理数据集
|
||||
@ -64,93 +90,93 @@ CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/lora_single_gpu/llama3_lo
|
||||
对于大数据集有帮助,在配置中使用 `tokenized_path` 以加载预处理后的数据集。
|
||||
|
||||
```bash
|
||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/lora_single_gpu/llama3_preprocess.yaml
|
||||
llamafactory-cli train examples/train_lora/llama3_preprocess.yaml
|
||||
```
|
||||
|
||||
#### 在 MMLU/CMMLU/C-Eval 上评估
|
||||
|
||||
```bash
|
||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli eval examples/lora_single_gpu/llama3_lora_eval.yaml
|
||||
llamafactory-cli eval examples/train_lora/llama3_lora_eval.yaml
|
||||
```
|
||||
|
||||
#### 批量预测并计算 BLEU 和 ROUGE 分数
|
||||
#### 多机指令监督微调
|
||||
|
||||
```bash
|
||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/lora_single_gpu/llama3_lora_predict.yaml
|
||||
FORCE_TORCHRUN=1 NNODES=2 NODE_RANK=0 MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/train_lora/llama3_lora_sft.yaml
|
||||
FORCE_TORCHRUN=1 NNODES=2 NODE_RANK=1 MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/train_lora/llama3_lora_sft.yaml
|
||||
```
|
||||
|
||||
### 单 GPU QLoRA 微调
|
||||
### 支持弹性和容错的多机指令监督微调
|
||||
|
||||
#### 基于 4/8 比特 Bitsandbytes 量化进行指令监督微调(推荐)
|
||||
要启动一个支持弹性节点和容错的多机指令微调,在每个节点上执行以下命令。弹性节点数量范围为 `MIN_NNODES:MAX_NNODES`,每个节点最多允许因为错误重启 `MAX_RESTARTS` 次。`RDZV_ID` 应设置为一个唯一的作业 ID(由参与该作业的所有节点共享)。更多新可以参考官方文档 [torchrun](https://docs.pytorch.org/docs/stable/elastic/run.html)。
|
||||
|
||||
```bash
|
||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/qlora_single_gpu/llama3_lora_sft_bitsandbytes.yaml
|
||||
```
|
||||
|
||||
#### 基于 4/8 比特 GPTQ 量化进行指令监督微调
|
||||
|
||||
```bash
|
||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/qlora_single_gpu/llama3_lora_sft_gptq.yaml
|
||||
```
|
||||
|
||||
#### 基于 4 比特 AWQ 量化进行指令监督微调
|
||||
|
||||
```bash
|
||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/qlora_single_gpu/llama3_lora_sft_awq.yaml
|
||||
```
|
||||
|
||||
#### 基于 2 比特 AQLM 量化进行指令监督微调
|
||||
|
||||
```bash
|
||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/qlora_single_gpu/llama3_lora_sft_aqlm.yaml
|
||||
```
|
||||
|
||||
### 多 GPU LoRA 微调
|
||||
|
||||
#### 使用 Accelerate 进行单节点训练
|
||||
|
||||
```bash
|
||||
bash examples/lora_multi_gpu/single_node.sh
|
||||
```
|
||||
|
||||
#### 使用 Accelerate 进行多节点训练
|
||||
|
||||
```bash
|
||||
bash examples/lora_multi_gpu/multi_node.sh
|
||||
FORCE_TORCHRUN=1 MIN_NNODES=1 MAX_NNODES=3 MAX_RESTARTS=3 RDZV_ID=llamafactory MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/train_full/llama3_full_sft.yaml
|
||||
```
|
||||
|
||||
#### 使用 DeepSpeed ZeRO-3 平均分配显存
|
||||
|
||||
```bash
|
||||
bash examples/lora_multi_gpu/ds_zero3.sh
|
||||
FORCE_TORCHRUN=1 llamafactory-cli train examples/train_lora/llama3_lora_sft_ds3.yaml
|
||||
```
|
||||
|
||||
### 多 NPU LoRA 微调
|
||||
|
||||
#### 使用 DeepSpeed ZeRO-0 训练
|
||||
#### 使用 Ray 在 4 张 GPU 上微调
|
||||
|
||||
```bash
|
||||
bash examples/lora_multi_npu/ds_zero0.sh
|
||||
USE_RAY=1 llamafactory-cli train examples/train_lora/llama3_lora_sft_ray.yaml
|
||||
```
|
||||
|
||||
### 多 GPU 全参数微调
|
||||
### QLoRA 微调
|
||||
|
||||
#### 使用 DeepSpeed 进行单节点训练
|
||||
#### 基于 4/8 比特 Bitsandbytes/HQQ/EETQ 量化进行指令监督微调(推荐)
|
||||
|
||||
```bash
|
||||
bash examples/full_multi_gpu/single_node.sh
|
||||
llamafactory-cli train examples/train_qlora/llama3_lora_sft_otfq.yaml
|
||||
```
|
||||
|
||||
#### 使用 DeepSpeed 进行多节点训练
|
||||
#### 在 NPU 上基于 4 比特 Bitsandbytes 量化进行指令监督微调
|
||||
|
||||
```bash
|
||||
bash examples/full_multi_gpu/multi_node.sh
|
||||
llamafactory-cli train examples/train_qlora/llama3_lora_sft_bnb_npu.yaml
|
||||
```
|
||||
|
||||
#### 批量预测并计算 BLEU 和 ROUGE 分数
|
||||
#### 基于 4/8 比特 GPTQ 量化进行指令监督微调
|
||||
|
||||
```bash
|
||||
bash examples/full_multi_gpu/predict.sh
|
||||
llamafactory-cli train examples/train_qlora/llama3_lora_sft_gptq.yaml
|
||||
```
|
||||
|
||||
#### 基于 4 比特 AWQ 量化进行指令监督微调
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/train_qlora/llama3_lora_sft_awq.yaml
|
||||
```
|
||||
|
||||
#### 基于 2 比特 AQLM 量化进行指令监督微调
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/train_qlora/llama3_lora_sft_aqlm.yaml
|
||||
```
|
||||
|
||||
### 全参数微调
|
||||
|
||||
#### 在单机上进行指令监督微调
|
||||
|
||||
```bash
|
||||
FORCE_TORCHRUN=1 llamafactory-cli train examples/train_full/llama3_full_sft.yaml
|
||||
```
|
||||
|
||||
#### 在多机上进行指令监督微调
|
||||
|
||||
```bash
|
||||
FORCE_TORCHRUN=1 NNODES=2 NODE_RANK=0 MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/train_full/llama3_full_sft.yaml
|
||||
FORCE_TORCHRUN=1 NNODES=2 NODE_RANK=1 MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/train_full/llama3_full_sft.yaml
|
||||
```
|
||||
|
||||
#### 多模态指令监督微调
|
||||
|
||||
```bash
|
||||
FORCE_TORCHRUN=1 llamafactory-cli train examples/train_full/qwen2_5vl_full_sft.yaml
|
||||
```
|
||||
|
||||
### 合并 LoRA 适配器与模型量化
|
||||
@ -160,33 +186,46 @@ bash examples/full_multi_gpu/predict.sh
|
||||
注:请勿使用量化后的模型或 `quantization_bit` 参数来合并 LoRA 适配器。
|
||||
|
||||
```bash
|
||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli export examples/merge_lora/llama3_lora_sft.yaml
|
||||
llamafactory-cli export examples/merge_lora/llama3_lora_sft.yaml
|
||||
```
|
||||
|
||||
#### 使用 AutoGPTQ 量化模型
|
||||
|
||||
```bash
|
||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli export examples/merge_lora/llama3_gptq.yaml
|
||||
llamafactory-cli export examples/merge_lora/llama3_gptq.yaml
|
||||
```
|
||||
|
||||
### 保存 Ollama 配置文件
|
||||
|
||||
```bash
|
||||
llamafactory-cli export examples/merge_lora/llama3_full_sft.yaml
|
||||
```
|
||||
|
||||
### 推理 LoRA 模型
|
||||
|
||||
#### 使用命令行接口
|
||||
#### 使用 vLLM 多卡推理评估
|
||||
|
||||
```bash
|
||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli chat examples/merge_lora/llama3_lora_sft.yaml
|
||||
```
|
||||
python scripts/vllm_infer.py --model_name_or_path meta-llama/Meta-Llama-3-8B-Instruct --template llama3 --dataset alpaca_en_demo
|
||||
python scripts/eval_bleu_rouge.py generated_predictions.jsonl
|
||||
```
|
||||
|
||||
#### 使用浏览器界面
|
||||
#### 使用命令行对话框
|
||||
|
||||
```bash
|
||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli webchat examples/merge_lora/llama3_lora_sft.yaml
|
||||
llamafactory-cli chat examples/inference/llama3_lora_sft.yaml
|
||||
```
|
||||
|
||||
#### 使用浏览器对话框
|
||||
|
||||
```bash
|
||||
llamafactory-cli webchat examples/inference/llama3_lora_sft.yaml
|
||||
```
|
||||
|
||||
#### 启动 OpenAI 风格 API
|
||||
|
||||
```bash
|
||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli api examples/merge_lora/llama3_lora_sft.yaml
|
||||
llamafactory-cli api examples/inference/llama3_lora_sft.yaml
|
||||
```
|
||||
|
||||
### 杂项
|
||||
@ -194,36 +233,60 @@ CUDA_VISIBLE_DEVICES=0 llamafactory-cli api examples/merge_lora/llama3_lora_sft.
|
||||
#### 使用 GaLore 进行全参数训练
|
||||
|
||||
```bash
|
||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/extras/galore/llama3_full_sft.yaml
|
||||
llamafactory-cli train examples/extras/galore/llama3_full_sft.yaml
|
||||
```
|
||||
|
||||
#### 使用 APOLLO 进行全参数训练
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/extras/apollo/llama3_full_sft.yaml
|
||||
```
|
||||
|
||||
#### 使用 BAdam 进行全参数训练
|
||||
|
||||
```bash
|
||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/extras/badam/llama3_full_sft.yaml
|
||||
llamafactory-cli train examples/extras/badam/llama3_full_sft.yaml
|
||||
```
|
||||
|
||||
#### 使用 Adam-mini 进行全参数训练
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/extras/adam_mini/qwen2_full_sft.yaml
|
||||
```
|
||||
|
||||
#### 使用 Muon 进行全参数训练
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/extras/muon/qwen2_full_sft.yaml
|
||||
```
|
||||
|
||||
#### LoRA+ 微调
|
||||
|
||||
```bash
|
||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/extras/loraplus/llama3_lora_sft.yaml
|
||||
llamafactory-cli train examples/extras/loraplus/llama3_lora_sft.yaml
|
||||
```
|
||||
|
||||
#### PiSSA 微调
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/extras/pissa/llama3_lora_sft.yaml
|
||||
```
|
||||
|
||||
#### 深度混合微调
|
||||
|
||||
```bash
|
||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/extras/mod/llama3_full_sft.yaml
|
||||
llamafactory-cli train examples/extras/mod/llama3_full_sft.yaml
|
||||
```
|
||||
|
||||
#### LLaMA-Pro 微调
|
||||
|
||||
```bash
|
||||
bash examples/extras/llama_pro/expand.sh
|
||||
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/extras/llama_pro/llama3_freeze_sft.yaml
|
||||
llamafactory-cli train examples/extras/llama_pro/llama3_freeze_sft.yaml
|
||||
```
|
||||
|
||||
#### FSDP+QLoRA 微调
|
||||
|
||||
```bash
|
||||
bash examples/extras/fsdp_qlora/single_node.sh
|
||||
bash examples/extras/fsdp_qlora/train.sh
|
||||
```
|
||||
|
@ -5,18 +5,18 @@ downcast_bf16: 'no'
|
||||
fsdp_config:
|
||||
fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
|
||||
fsdp_backward_prefetch: BACKWARD_PRE
|
||||
fsdp_cpu_ram_efficient_loading: true
|
||||
fsdp_forward_prefetch: false
|
||||
fsdp_offload_params: true
|
||||
fsdp_cpu_ram_efficient_loading: true
|
||||
fsdp_offload_params: false
|
||||
fsdp_sharding_strategy: FULL_SHARD
|
||||
fsdp_state_dict_type: FULL_STATE_DICT
|
||||
fsdp_sync_module_states: true
|
||||
fsdp_use_orig_params: false
|
||||
fsdp_use_orig_params: true
|
||||
machine_rank: 0
|
||||
main_training_function: main
|
||||
mixed_precision: fp16
|
||||
num_machines: 1 # the number of nodes
|
||||
num_processes: 2 # the number of GPUs in all nodes
|
||||
mixed_precision: bf16 # or fp16
|
||||
num_machines: 1 # the number of nodes
|
||||
num_processes: 2 # the number of GPUs in all nodes
|
||||
rdzv_backend: static
|
||||
same_network: true
|
||||
tpu_env: []
|
||||
|
25
examples/accelerate/fsdp_config_offload.yaml
Normal file
25
examples/accelerate/fsdp_config_offload.yaml
Normal file
@ -0,0 +1,25 @@
|
||||
compute_environment: LOCAL_MACHINE
|
||||
debug: false
|
||||
distributed_type: FSDP
|
||||
downcast_bf16: 'no'
|
||||
fsdp_config:
|
||||
fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
|
||||
fsdp_backward_prefetch: BACKWARD_PRE
|
||||
fsdp_forward_prefetch: false
|
||||
fsdp_cpu_ram_efficient_loading: true
|
||||
fsdp_offload_params: true # offload may affect training speed
|
||||
fsdp_sharding_strategy: FULL_SHARD
|
||||
fsdp_state_dict_type: FULL_STATE_DICT
|
||||
fsdp_sync_module_states: true
|
||||
fsdp_use_orig_params: true
|
||||
machine_rank: 0
|
||||
main_training_function: main
|
||||
mixed_precision: bf16 # or fp16
|
||||
num_machines: 1 # the number of nodes
|
||||
num_processes: 2 # the number of GPUs in all nodes
|
||||
rdzv_backend: static
|
||||
same_network: true
|
||||
tpu_env: []
|
||||
tpu_use_cluster: false
|
||||
tpu_use_sudo: false
|
||||
use_cpu: false
|
@ -1,18 +0,0 @@
|
||||
compute_environment: LOCAL_MACHINE
|
||||
debug: false
|
||||
distributed_type: MULTI_GPU
|
||||
downcast_bf16: 'no'
|
||||
gpu_ids: all
|
||||
machine_rank: 0
|
||||
main_process_ip: 192.168.0.1
|
||||
main_process_port: 29555
|
||||
main_training_function: main
|
||||
mixed_precision: fp16
|
||||
num_machines: 2 # the number of nodes
|
||||
num_processes: 8 # the number of GPUs in all nodes
|
||||
rdzv_backend: static
|
||||
same_network: true
|
||||
tpu_env: []
|
||||
tpu_use_cluster: false
|
||||
tpu_use_sudo: false
|
||||
use_cpu: false
|
@ -1,16 +0,0 @@
|
||||
compute_environment: LOCAL_MACHINE
|
||||
debug: false
|
||||
distributed_type: MULTI_GPU
|
||||
downcast_bf16: 'no'
|
||||
gpu_ids: all
|
||||
machine_rank: 0
|
||||
main_training_function: main
|
||||
mixed_precision: fp16
|
||||
num_machines: 1 # the number of nodes
|
||||
num_processes: 4 # the number of GPUs in all nodes
|
||||
rdzv_backend: static
|
||||
same_network: true
|
||||
tpu_env: []
|
||||
tpu_use_cluster: false
|
||||
tpu_use_sudo: false
|
||||
use_cpu: false
|
@ -1,18 +0,0 @@
|
||||
compute_environment: LOCAL_MACHINE
|
||||
debug: false
|
||||
distributed_type: MULTI_GPU
|
||||
downcast_bf16: 'no'
|
||||
gpu_ids: all
|
||||
machine_rank: 1
|
||||
main_process_ip: 192.168.0.1
|
||||
main_process_port: 29555
|
||||
main_training_function: main
|
||||
mixed_precision: fp16
|
||||
num_machines: 2 # the number of nodes
|
||||
num_processes: 8 # the number of GPUs in all nodes
|
||||
rdzv_backend: static
|
||||
same_network: true
|
||||
tpu_env: []
|
||||
tpu_use_cluster: false
|
||||
tpu_use_sudo: false
|
||||
use_cpu: false
|
@ -19,10 +19,10 @@
|
||||
"stage": 0,
|
||||
"allgather_partitions": true,
|
||||
"allgather_bucket_size": 5e8,
|
||||
"overlap_comm": true,
|
||||
"overlap_comm": false,
|
||||
"reduce_scatter": true,
|
||||
"reduce_bucket_size": 5e8,
|
||||
"contiguous_gradients": true,
|
||||
"round_robin_gradients": true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -19,10 +19,10 @@
|
||||
"stage": 2,
|
||||
"allgather_partitions": true,
|
||||
"allgather_bucket_size": 5e8,
|
||||
"overlap_comm": true,
|
||||
"overlap_comm": false,
|
||||
"reduce_scatter": true,
|
||||
"reduce_bucket_size": 5e8,
|
||||
"contiguous_gradients": true,
|
||||
"round_robin_gradients": true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -23,10 +23,10 @@
|
||||
},
|
||||
"allgather_partitions": true,
|
||||
"allgather_bucket_size": 5e8,
|
||||
"overlap_comm": true,
|
||||
"overlap_comm": false,
|
||||
"reduce_scatter": true,
|
||||
"reduce_bucket_size": 5e8,
|
||||
"contiguous_gradients": true,
|
||||
"round_robin_gradients": true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -17,7 +17,7 @@
|
||||
},
|
||||
"zero_optimization": {
|
||||
"stage": 3,
|
||||
"overlap_comm": true,
|
||||
"overlap_comm": false,
|
||||
"contiguous_gradients": true,
|
||||
"sub_group_size": 1e9,
|
||||
"reduce_bucket_size": "auto",
|
||||
@ -27,4 +27,4 @@
|
||||
"stage3_max_reuse_distance": 1e9,
|
||||
"stage3_gather_16bit_weights_on_model_save": true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -25,7 +25,7 @@
|
||||
"device": "cpu",
|
||||
"pin_memory": true
|
||||
},
|
||||
"overlap_comm": true,
|
||||
"overlap_comm": false,
|
||||
"contiguous_gradients": true,
|
||||
"sub_group_size": 1e9,
|
||||
"reduce_bucket_size": "auto",
|
||||
@ -35,4 +35,4 @@
|
||||
"stage3_max_reuse_distance": 1e9,
|
||||
"stage3_gather_16bit_weights_on_model_save": true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
43
examples/extras/adam_mini/qwen2_full_sft.yaml
Normal file
43
examples/extras/adam_mini/qwen2_full_sft.yaml
Normal file
@ -0,0 +1,43 @@
|
||||
### model
|
||||
model_name_or_path: Qwen/Qwen2-1.5B-Instruct
|
||||
trust_remote_code: true
|
||||
|
||||
### method
|
||||
stage: sft
|
||||
do_train: true
|
||||
finetuning_type: full
|
||||
use_adam_mini: true
|
||||
|
||||
### dataset
|
||||
dataset: identity,alpaca_en_demo
|
||||
template: qwen
|
||||
cutoff_len: 2048
|
||||
max_samples: 1000
|
||||
overwrite_cache: true
|
||||
preprocessing_num_workers: 16
|
||||
dataloader_num_workers: 4
|
||||
|
||||
### output
|
||||
output_dir: saves/qwen2-1_5b/full/sft
|
||||
logging_steps: 10
|
||||
save_steps: 500
|
||||
plot_loss: true
|
||||
overwrite_output_dir: true
|
||||
save_only_model: false
|
||||
report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow]
|
||||
|
||||
### train
|
||||
per_device_train_batch_size: 1
|
||||
gradient_accumulation_steps: 8
|
||||
learning_rate: 1.0e-5
|
||||
num_train_epochs: 3.0
|
||||
lr_scheduler_type: cosine
|
||||
warmup_ratio: 0.1
|
||||
bf16: true
|
||||
ddp_timeout: 180000000
|
||||
|
||||
### eval
|
||||
# val_size: 0.1
|
||||
# per_device_eval_batch_size: 1
|
||||
# eval_strategy: steps
|
||||
# eval_steps: 500
|
48
examples/extras/apollo/llama3_full_sft.yaml
Normal file
48
examples/extras/apollo/llama3_full_sft.yaml
Normal file
@ -0,0 +1,48 @@
|
||||
### model
|
||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
||||
trust_remote_code: true
|
||||
|
||||
### method
|
||||
stage: sft
|
||||
do_train: true
|
||||
finetuning_type: full
|
||||
use_apollo: true
|
||||
apollo_layerwise: true # choices: [true, false], use false for DDP training
|
||||
apollo_target: all
|
||||
apollo_rank: 128
|
||||
apollo_scale: 32.0
|
||||
apollo_scale_type: channel
|
||||
|
||||
### dataset
|
||||
dataset: identity,alpaca_en_demo
|
||||
template: llama3
|
||||
cutoff_len: 2048
|
||||
max_samples: 1000
|
||||
overwrite_cache: true
|
||||
preprocessing_num_workers: 16
|
||||
dataloader_num_workers: 4
|
||||
|
||||
### output
|
||||
output_dir: saves/llama3-8b/full/sft
|
||||
logging_steps: 10
|
||||
save_steps: 500
|
||||
plot_loss: true
|
||||
overwrite_output_dir: true
|
||||
save_only_model: false
|
||||
report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow]
|
||||
|
||||
### train
|
||||
per_device_train_batch_size: 1
|
||||
gradient_accumulation_steps: 1 # use 1 for layerwise apollo
|
||||
learning_rate: 1.0e-5
|
||||
num_train_epochs: 3.0
|
||||
lr_scheduler_type: cosine
|
||||
warmup_ratio: 0.1
|
||||
pure_bf16: true
|
||||
ddp_timeout: 180000000
|
||||
|
||||
### eval
|
||||
# val_size: 0.1
|
||||
# per_device_eval_batch_size: 1
|
||||
# eval_strategy: steps
|
||||
# eval_steps: 500
|
46
examples/extras/badam/llama3_full_sft.yaml
Normal file
46
examples/extras/badam/llama3_full_sft.yaml
Normal file
@ -0,0 +1,46 @@
|
||||
### model
|
||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
||||
trust_remote_code: true
|
||||
|
||||
### method
|
||||
stage: sft
|
||||
do_train: true
|
||||
finetuning_type: full
|
||||
use_badam: true
|
||||
badam_mode: layer
|
||||
badam_switch_mode: ascending
|
||||
badam_switch_interval: 50
|
||||
badam_verbose: 2
|
||||
# deepspeed: examples/deepspeed/ds_z3_config.json
|
||||
|
||||
### dataset
|
||||
dataset: identity,alpaca_en_demo
|
||||
template: llama3
|
||||
cutoff_len: 2048
|
||||
max_samples: 1000
|
||||
overwrite_cache: true
|
||||
preprocessing_num_workers: 16
|
||||
dataloader_num_workers: 4
|
||||
|
||||
### output
|
||||
output_dir: saves/llama3-8b/full/sft
|
||||
logging_steps: 10
|
||||
save_steps: 500
|
||||
plot_loss: true
|
||||
overwrite_output_dir: true
|
||||
save_only_model: false
|
||||
report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow]
|
||||
|
||||
### train
|
||||
per_device_train_batch_size: 1
|
||||
gradient_accumulation_steps: 8
|
||||
learning_rate: 1.0e-5
|
||||
num_train_epochs: 3.0
|
||||
lr_scheduler_type: cosine
|
||||
warmup_ratio: 0.1
|
||||
|
||||
### eval
|
||||
# val_size: 0.1
|
||||
# per_device_eval_batch_size: 1
|
||||
# eval_strategy: steps
|
||||
# eval_steps: 500
|
@ -1,41 +0,0 @@
|
||||
# model
|
||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
||||
|
||||
# method
|
||||
stage: sft
|
||||
do_train: true
|
||||
finetuning_type: full
|
||||
use_badam: true
|
||||
badam_switch_mode: descending
|
||||
badam_switch_interval: 50
|
||||
badam_verbose: 2
|
||||
|
||||
# dataset
|
||||
dataset: identity,alpaca_gpt4_en
|
||||
template: llama3
|
||||
cutoff_len: 1024
|
||||
max_samples: 1000
|
||||
overwrite_cache: true
|
||||
preprocessing_num_workers: 16
|
||||
|
||||
# output
|
||||
output_dir: saves/llama3-8b/full/sft
|
||||
logging_steps: 10
|
||||
save_steps: 500
|
||||
plot_loss: true
|
||||
overwrite_output_dir: true
|
||||
|
||||
# train
|
||||
per_device_train_batch_size: 1
|
||||
gradient_accumulation_steps: 8
|
||||
learning_rate: 0.0001
|
||||
num_train_epochs: 3.0
|
||||
lr_scheduler_type: cosine
|
||||
warmup_steps: 0.1
|
||||
pure_bf16: true
|
||||
|
||||
# eval
|
||||
val_size: 0.1
|
||||
per_device_eval_batch_size: 1
|
||||
evaluation_strategy: steps
|
||||
eval_steps: 500
|
@ -1,42 +1,45 @@
|
||||
# model
|
||||
### model
|
||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
||||
quantization_bit: 4
|
||||
trust_remote_code: true
|
||||
|
||||
# method
|
||||
### method
|
||||
stage: sft
|
||||
do_train: true
|
||||
finetuning_type: lora
|
||||
lora_target: q_proj,v_proj
|
||||
lora_rank: 8
|
||||
lora_target: all
|
||||
|
||||
# ddp
|
||||
ddp_timeout: 180000000
|
||||
|
||||
# dataset
|
||||
dataset: identity,alpaca_gpt4_en
|
||||
### dataset
|
||||
dataset: identity,alpaca_en_demo
|
||||
template: llama3
|
||||
cutoff_len: 1024
|
||||
cutoff_len: 2048
|
||||
max_samples: 1000
|
||||
overwrite_cache: true
|
||||
preprocessing_num_workers: 16
|
||||
dataloader_num_workers: 4
|
||||
|
||||
# output
|
||||
### output
|
||||
output_dir: saves/llama3-8b/lora/sft
|
||||
logging_steps: 10
|
||||
save_steps: 500
|
||||
plot_loss: true
|
||||
overwrite_output_dir: true
|
||||
save_only_model: false
|
||||
report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow]
|
||||
|
||||
# train
|
||||
### train
|
||||
per_device_train_batch_size: 1
|
||||
gradient_accumulation_steps: 8
|
||||
learning_rate: 0.0001
|
||||
learning_rate: 1.0e-4
|
||||
num_train_epochs: 3.0
|
||||
lr_scheduler_type: cosine
|
||||
warmup_steps: 0.1
|
||||
fp16: true
|
||||
warmup_ratio: 0.1
|
||||
bf16: true
|
||||
ddp_timeout: 180000000
|
||||
|
||||
# eval
|
||||
val_size: 0.1
|
||||
per_device_eval_batch_size: 1
|
||||
evaluation_strategy: steps
|
||||
eval_steps: 500
|
||||
### eval
|
||||
# val_size: 0.1
|
||||
# per_device_eval_batch_size: 1
|
||||
# eval_strategy: steps
|
||||
# eval_steps: 500
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user