Compare commits
816 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
4ba7de0434 | ||
|
ea8a2d60d0 | ||
|
ae0ef374a3 | ||
|
edd112f35c | ||
|
7218d4aa96 | ||
|
4380b7b35e | ||
|
3307ff1d4a | ||
|
2aadc90c2d | ||
|
2353e16e20 | ||
|
6812f5e1f5 | ||
|
2077875622 | ||
|
678b7d69d2 | ||
|
f00742b078 | ||
|
fdb70c04e0 | ||
|
95ed6c45cd | ||
|
cf1087d409 | ||
|
766884fa5c | ||
|
6a8d88826e | ||
|
043103e1c9 | ||
|
5817583630 | ||
|
62bd2c8047 | ||
|
1b549e3199 | ||
|
c6290db118 | ||
|
d30cbcdfa5 | ||
|
62c6943699 | ||
|
8e7727f4ee | ||
|
e117e3c2b7 | ||
|
dcd75e7063 | ||
|
4465e4347e | ||
|
c5a08291f4 | ||
|
544b7dc2ed | ||
|
ac6c93df1f | ||
|
0b188ca00c | ||
|
0a004904bd | ||
|
bb7bf51554 | ||
|
7242caf0ff | ||
|
ed57b7ba2a | ||
|
b10333dafb | ||
|
6b46c8b689 | ||
|
be27eae175 | ||
|
31b0787e12 | ||
|
fffa43be86 | ||
|
8ed085e403 | ||
|
1221533542 | ||
|
8a3bddc7fa | ||
|
3a119ed5a2 | ||
|
0d7d0ea972 | ||
|
0e1fea71d2 | ||
|
ec04d7b89c | ||
|
cabc9207be | ||
|
f3d144f001 | ||
|
af2f75e688 | ||
|
9f2f12b0fe | ||
|
bb84c3c83e | ||
|
1cfe42916d | ||
|
5ed62a29c5 | ||
|
d39d3106cb | ||
|
8ffe7daa8d | ||
|
8fa55db1ec | ||
|
d8a5050cfb | ||
|
7ecc2d46ca | ||
|
d325a1a7c7 | ||
|
239ced076c | ||
|
f5f356649b | ||
|
13fd43617c | ||
|
fcd8662306 | ||
|
cecba57b3e | ||
|
83688b0b4d | ||
|
5308424705 | ||
|
81c4d9bee6 | ||
|
ee676d29f6 | ||
|
e3d5e0fa28 | ||
|
3425bc6e71 | ||
|
6cc247e815 | ||
|
16a3f8a71b | ||
|
65aa86ed39 | ||
|
fba9c9d9b9 | ||
|
21829b5eaf | ||
|
762c2d776f | ||
|
e31afdfd4f | ||
|
be02003d16 | ||
|
a4048b7bb6 | ||
|
73b12baaaf | ||
|
55d37dc472 | ||
|
54ffd06026 | ||
|
00c4988f89 | ||
|
57c6e23247 | ||
|
07f792148e | ||
|
e542f95710 | ||
|
4ecf4daeb2 | ||
|
519ac92803 | ||
|
16e1a5097e | ||
|
09436c1f45 | ||
|
f3a1dc8483 | ||
|
dc8cca11b3 | ||
|
e6f45d696b | ||
|
c477ae6405 | ||
|
52dead8775 | ||
|
a9211a730e | ||
|
763fbc294b | ||
|
b0c8ba73e0 | ||
|
b3b2c9f1ee | ||
|
f96c085857 | ||
|
b83a38eb98 | ||
|
f3fd67a9bb | ||
|
a6f3adf930 | ||
|
ed2f89efaf | ||
|
16e26236eb | ||
|
89a0d10c18 | ||
|
820ed764c4 | ||
|
66f719dd96 | ||
|
130bfaf8e3 | ||
|
e8a18c17e9 | ||
|
2b23c0a7a1 | ||
|
ab2c05115b | ||
|
8d472c20cb | ||
|
845af89ea4 | ||
|
cef3a0b2e2 | ||
|
865ac07491 | ||
|
f584db50cf | ||
|
97e0a4cb5c | ||
|
c6bcca4c83 | ||
|
5ee9eb64d8 | ||
|
937447bd8a | ||
|
52f25651a2 | ||
|
75d7c35fdf | ||
|
6a584b4092 | ||
|
41ec928683 | ||
|
d8295cd601 | ||
|
a8430f4244 | ||
|
072bfe29d3 | ||
|
c5b1d07e7c | ||
|
77c569e071 | ||
|
ae392e054c | ||
|
369474451d | ||
|
1f338deb87 | ||
|
00b5c05946 | ||
|
1bd319d16c | ||
|
fcca3b0b0d | ||
|
035e98035c | ||
|
b4407e4b0b | ||
|
036a76e9cb | ||
|
4fbdc65fcb | ||
|
2989d39239 | ||
|
1344416378 | ||
|
1dd67eb042 | ||
|
2b7d564e3b | ||
|
d43013f14a | ||
|
c91165a5a6 | ||
|
7f3c31f6f4 | ||
|
92101f34a1 | ||
|
a62cba3d05 | ||
|
d128382d3c | ||
|
278df4308d | ||
|
81768df04c | ||
|
1302ca39f6 | ||
|
b8cddbc7d7 | ||
|
ec7257e70f | ||
|
a4455e3021 | ||
|
610f164c69 | ||
|
0a0cfeb782 | ||
|
4831552856 | ||
|
125513fa5c | ||
|
8543400584 | ||
|
e1fdd6e2f8 | ||
|
d07983dceb | ||
|
9b94211045 | ||
|
0fe5631f9b | ||
|
b5d667cebf | ||
|
ac8c6fdd3a | ||
|
df8752e8ee | ||
|
3a13d2cdb1 | ||
|
3ef36d0057 | ||
|
1fd4d14fbb | ||
|
481ecbf9c5 | ||
|
60a84f664b | ||
|
11bcafd06a | ||
|
6c53471de2 | ||
|
39c1e29ed7 | ||
|
ee840b4e01 | ||
|
3bdc7e1e6c | ||
|
34fdabe005 | ||
|
24cb890432 | ||
|
39876b85fc | ||
|
7d8bee96fc | ||
|
8f5f4cc559 | ||
|
8ee26642f3 | ||
|
5817cda37e | ||
|
7e0cdb1a76 | ||
|
6c200fd218 | ||
|
61b24c3827 | ||
|
32cb086be1 | ||
|
80f8d037d0 | ||
|
11997593be | ||
|
903db09822 | ||
|
aaf2e6ba2a | ||
|
9deece1d50 | ||
|
f06a74ad4e | ||
|
6faa6fb53d | ||
|
5d1cc863a4 | ||
|
6d6e0f44fc | ||
|
2d421c57bf | ||
|
185c76f6ad | ||
|
468eea6f6d | ||
|
49436e93e6 | ||
|
b00cb2ed42 | ||
|
f547334604 | ||
|
01166841cf | ||
|
59e12bffe8 | ||
|
b6d8749bf3 | ||
|
bc9ada9db7 | ||
|
b6dc7e01e2 | ||
|
59a56f7226 | ||
|
9abee9cd1a | ||
|
833edc7c73 | ||
|
42e090d38b | ||
|
747e02d60d | ||
|
c841e92116 | ||
|
fbf49e2500 | ||
|
7d4dc25c23 | ||
|
b1b78daf06 | ||
|
dfbe1391e9 | ||
|
ebc989ad4a | ||
|
d8a5571be7 | ||
|
555b71a1cb | ||
|
4a5d0f0ba7 | ||
|
c518146e62 | ||
|
1d2131e5cb | ||
|
48a6584fb1 | ||
|
a71e685021 | ||
|
30038d9ce7 | ||
|
ef5f1c1def | ||
|
3dff4ecca8 | ||
|
0dbce72fb8 | ||
|
e9b427d535 | ||
|
d7d79f7e06 | ||
|
9ccfb97a2c | ||
|
165d3ed084 | ||
|
142fd7e755 | ||
|
7c1640ed5f | ||
|
cdafa8a15e | ||
|
b256ca86f0 | ||
|
7a7071e504 | ||
|
847ae972d0 | ||
|
1c634d9c53 | ||
|
99b71768a0 | ||
|
37b844d929 | ||
|
f5810a6e47 | ||
|
317d0855d2 | ||
|
0a43bc1960 | ||
|
5a29f49fb1 | ||
|
4e68828e46 | ||
|
9a0044ef5e | ||
|
d412301d08 | ||
|
5a0fd22c05 | ||
|
df63f05b47 | ||
|
98ea0e8109 | ||
|
33b4c33279 | ||
|
113cc3d920 | ||
|
b6c0e8608e | ||
|
eba31ae313 | ||
|
e7556b591e | ||
|
2b21c749c1 | ||
|
002f58ef8e | ||
|
c67d2b9327 | ||
|
6e58115f98 | ||
|
8dddffa340 | ||
|
e1d574a784 | ||
|
caef0a8937 | ||
|
392533e139 | ||
|
299cd03785 | ||
|
ee1b580328 | ||
|
54a090079c | ||
|
210cdb9557 | ||
|
e86cb8a4fa | ||
|
f4aa0a146c | ||
|
96636c3729 | ||
|
81947f1d2c | ||
|
dca5fe14c2 | ||
|
ca78ba964d | ||
|
9359ee18ad | ||
|
15f3087b96 | ||
|
1fcedf9af6 | ||
|
b0bbacaacb | ||
|
beb1a9f9d9 | ||
|
3fbd4848e8 | ||
|
184c5d0882 | ||
|
1f4a0b11ba | ||
|
b1d31ff0f9 | ||
|
a8c9d5663d | ||
|
475a355b82 | ||
|
3dc938268c | ||
|
e55ec42d3c | ||
|
2baf8bf03d | ||
|
13e1b7ee2b | ||
|
cd493b91de | ||
|
48173b606c | ||
|
0ad9f7f058 | ||
|
1adb46875f | ||
|
9b852ebe25 | ||
|
07aa7b71a3 | ||
|
1679930e00 | ||
|
d50e04b805 | ||
|
e515fe62de | ||
|
036fb0d561 | ||
|
bae934dea3 | ||
|
2e2f6bea07 | ||
|
1b02183da9 | ||
|
197aa3baf4 | ||
|
c6be9e242c | ||
|
2e954d8fd2 | ||
|
fafa3add84 | ||
|
593acca556 | ||
|
188f22d8a7 | ||
|
703bb9cc18 | ||
|
5433b318bb | ||
|
fe4f4e9758 | ||
|
1bb3d17d9e | ||
|
b93333685b | ||
|
fcd0f0480d | ||
|
ff6658ad27 | ||
|
28037c7834 | ||
|
f70208e1c0 | ||
|
7aa9767dc2 | ||
|
01915eaf40 | ||
|
e665e1fed5 | ||
|
1fee69f874 | ||
|
8504bde893 | ||
|
85f22d01bf | ||
|
822d5d362c | ||
|
32163e7ce0 | ||
|
454140d912 | ||
|
445d643ef3 | ||
|
e8c1979b79 | ||
|
f6779b0e0c | ||
|
245de012ca | ||
|
f143360ee6 | ||
|
f5350b103b | ||
|
aa7c07caf0 | ||
|
324f07613a | ||
|
0c59483368 | ||
|
1efe525df7 | ||
|
ee0b3b1e1a | ||
|
f87c788154 | ||
|
bbf334f823 | ||
|
770433fa33 | ||
|
788accb601 | ||
|
555f17c1ee | ||
|
8895cf1152 | ||
|
320e40d873 | ||
|
9ef85f8fc4 | ||
|
763f9b9df0 | ||
|
57043fb4e6 | ||
|
91433d639c | ||
|
864ee06243 | ||
|
a52496cc09 | ||
|
ad119afc58 | ||
|
8f73c75c16 | ||
|
5e699458e5 | ||
|
201a495154 | ||
|
d8cba9464f | ||
|
089c7d5e51 | ||
|
15bba15725 | ||
|
0b47c2a293 | ||
|
313ce9a576 | ||
|
ee87d318b8 | ||
|
4741eec2d1 | ||
|
d2afe0c63c | ||
|
bdded9d41a | ||
|
8c79fe6a5a | ||
|
63bb2b7235 | ||
|
e7f928adc4 | ||
|
62c12a133e | ||
|
08e8499a98 | ||
|
d5b18ee4a6 | ||
|
93cc1f167b | ||
|
c89d17ab63 | ||
|
9213e48fa2 | ||
|
0fb50f9c88 | ||
|
bcbe37ff52 | ||
|
994049380d | ||
|
cc6a6f698f | ||
|
7138b43873 | ||
|
aeb4f82ef2 | ||
|
f51ac40f0a | ||
|
165fe8e219 | ||
|
4243c618f0 | ||
|
368d22f79a | ||
|
b3561ae552 | ||
|
b395540826 | ||
|
a1b5644889 | ||
|
b471def13d | ||
|
b777fed171 | ||
|
618ceda6e9 | ||
|
014a7ea042 | ||
|
da542fad18 | ||
|
984b202f83 | ||
|
0c1ad5f3fb | ||
|
b4174021d6 | ||
|
bba52e258e | ||
|
1217240918 | ||
|
a0bcac80c0 | ||
|
8c57169eb7 | ||
|
b9eeaa9706 | ||
|
621d73e87c | ||
|
a02a140840 | ||
|
a0188a430f | ||
|
b5ef5059ee | ||
|
084d356c2c | ||
|
20a9565e36 | ||
|
85317bcbaf | ||
|
528fb4f799 | ||
|
aa7ec44367 | ||
|
b2ecb80729 | ||
|
9a3afbd5d1 | ||
|
37c60c7d14 | ||
|
b921dde749 | ||
|
d195329185 | ||
|
da8721a70e | ||
|
f318dc9464 | ||
|
01bbe66f41 | ||
|
bb664d2fc5 | ||
|
d0e729cd33 | ||
|
1178cb0e33 | ||
|
089f824cd1 | ||
|
813f5919a3 | ||
|
951d845af2 | ||
|
3bcb4633ca | ||
|
c76c33ddb1 | ||
|
a37ef0eaae | ||
|
377dfe5665 | ||
|
f6d5dd6f10 | ||
|
a36f9d923e | ||
|
c83b74ab9e | ||
|
c5780f5eaa | ||
|
4cd1d05429 | ||
|
459219a260 | ||
|
353259f03f | ||
|
8265d6a228 | ||
|
c0418062c0 | ||
|
47c2d91933 | ||
|
f07bad7144 | ||
|
9d437a5f4f | ||
|
1c1d6bea43 | ||
|
547f76e56e | ||
|
67d4757c35 | ||
|
cc703b58f5 | ||
|
8f786ee938 | ||
|
03dba638e6 | ||
|
dd22454fc5 | ||
|
904f18b4a2 | ||
|
b512a06c3d | ||
|
c31933ef9e | ||
|
8524dcaa4a | ||
|
53103f55b6 | ||
|
cc5cde734b | ||
|
af9ef037dd | ||
|
95d3c2620b | ||
|
1a48340680 | ||
|
d6ce1045f7 | ||
|
92a0d08e27 | ||
|
910884065e | ||
|
433d116080 | ||
|
d43080b534 | ||
|
5f0dd86c15 | ||
|
a421113466 | ||
|
acd62fddb8 | ||
|
d8f6569be1 | ||
|
857d23b324 | ||
|
ad00c793ce | ||
|
f6a2bfc0e8 | ||
|
1cc24ed206 | ||
|
a935933bed | ||
|
09419dfbab | ||
|
76ebd62ac1 | ||
|
fe4546a7bb | ||
|
cfb4c42ae4 | ||
|
fc18db6290 | ||
|
64bac4bc7e | ||
|
002c7d2867 | ||
|
a94a1eac67 | ||
|
a8a990a9a7 | ||
|
bff1b94583 | ||
|
4caf043cf8 | ||
|
50ca43c3fb | ||
|
0f49e9cb07 | ||
|
ba901bc000 | ||
|
6f1e450739 | ||
|
93d1cba06e | ||
|
cf8cad8e7e | ||
|
255260cfcb | ||
|
88b06a0c7f | ||
|
7f8c59144e | ||
|
90fb5605c1 | ||
|
9f9ad6435d | ||
|
819f487c8f | ||
|
9bbeba6323 | ||
|
92940817e7 | ||
|
68614f6bc1 | ||
|
235cdcacee | ||
|
b2c67a989a | ||
|
ed4c4bab49 | ||
|
1804e8a491 | ||
|
0ef1dc4dd5 | ||
|
b34c3bb796 | ||
|
aa5535c622 | ||
|
d4bf81b36a | ||
|
99265c7d2f | ||
|
0efa34c9ef | ||
|
f4729904f2 | ||
|
1c3d86cd65 | ||
|
f5e6e25a1b | ||
|
ae09c6c214 | ||
|
265a5821de | ||
|
9822cb7bac | ||
|
d51d96d594 | ||
|
09a3a59c88 | ||
|
dfa4e927dd | ||
|
61320965aa | ||
|
ab3782b0fa | ||
|
6cd90efb82 | ||
|
08ca40876a | ||
|
358708ee97 | ||
|
006022cadd | ||
|
e0325b1123 | ||
|
118ffe50e3 | ||
|
a6aeb98af6 | ||
|
c0ffe68745 | ||
|
1a8c26a7d9 | ||
|
4f1d5b6396 | ||
|
697657271f | ||
|
a489f10986 | ||
|
f48d07cd6c | ||
|
f1a1fb675b | ||
|
f8f3638bff | ||
|
1176cd6779 | ||
|
4165c4414d | ||
|
7f74e54bee | ||
|
3f3eeedca0 | ||
|
65699c29d4 | ||
|
a04538e70b | ||
|
708116a5f3 | ||
|
5a7ff02806 | ||
|
dcc67ac1a5 | ||
|
7ed5a712f8 | ||
|
7eaafe08bc | ||
|
503327b5bb | ||
|
4bba121f43 | ||
|
8dff6f630c | ||
|
253752ccca | ||
|
e99031daa4 | ||
|
23fc0c863e | ||
|
3295519099 | ||
|
20faaf3418 | ||
|
24419dd3f1 | ||
|
1eaaa85980 | ||
|
e272f87849 | ||
|
4d49dc0559 | ||
|
527ea7297b | ||
|
302e4e22bf | ||
|
2ea2bc7c71 | ||
|
505edd4da0 | ||
|
e27a0c3d53 | ||
|
32656bc50d | ||
|
bf2b8df540 | ||
|
7ad5b5c088 | ||
|
a2a9936e1f | ||
|
618a8e6c9f | ||
|
acd70faf17 | ||
|
9815d1712c | ||
|
e3a0640659 | ||
|
d4e0010027 | ||
|
d6b9a2024b | ||
|
625ae6f456 | ||
|
1c1e48a570 | ||
|
8ae2056b59 | ||
|
9c394f11ef | ||
|
662093b38c | ||
|
e928f7fc4c | ||
|
82344629af | ||
|
0d18cca0db | ||
|
0386fa6a4f | ||
|
d7476aa55b | ||
|
d996bea1c7 | ||
|
1e00de38cb | ||
|
d7e35ddcaa | ||
|
162f7028fc | ||
|
97f4451912 | ||
|
265875fffd | ||
|
bf2959c175 | ||
|
2e6dd72539 | ||
|
1598e5d355 | ||
|
2360d63ebc | ||
|
d68af5b04a | ||
|
3730fc046f | ||
|
7f7ee0a660 | ||
|
e3fb3c313c | ||
|
9d463b611c | ||
|
feb547aa99 | ||
|
f05685c7cf | ||
|
8c2b7aa1ab | ||
|
d99e164cad | ||
|
7dbb338df7 | ||
|
604f4005c9 | ||
|
c2766af6f4 | ||
|
e7b11e4fdb | ||
|
5ad86fc71d | ||
|
6f79974e8b | ||
|
51177c933a | ||
|
0f53217bbc | ||
|
e83cb17f97 | ||
|
7806bde8ad | ||
|
4b2c47fcae | ||
|
33e8bfc3ae | ||
|
ac677205c9 | ||
|
7fa46a24df | ||
|
7555c54c9f | ||
|
3f7c874594 | ||
|
25093c2d82 | ||
|
2eba98e152 | ||
|
8ecc12ee2a | ||
|
1ab3ea21ce | ||
|
9108df2b97 | ||
|
7487bd7b1f | ||
|
efda735f32 | ||
|
584ce3a105 | ||
|
1b02915d19 | ||
|
aba4268607 | ||
|
5142faca8f | ||
|
15786539d7 | ||
|
49054329d0 | ||
|
54961946ac | ||
|
2179b91acb | ||
|
fb8f35558a | ||
|
90cd3538de | ||
|
eca50b89a2 | ||
|
2876b429bc | ||
|
acfff4319a | ||
|
b86b869187 | ||
|
233556d1c7 | ||
|
4f10d3e28c | ||
|
13c7e873e0 | ||
|
3053a806e9 | ||
|
6cd0d7da29 | ||
|
d183966a5d | ||
|
825ea1c72d | ||
|
0d8aa6e6ef | ||
|
163cf2ba5c | ||
|
6989b8c341 | ||
|
c24d477bdb | ||
|
3e3969784f | ||
|
823d7f5c81 | ||
|
f03b20b267 | ||
|
9d1f079ca5 | ||
|
b9c6fcfe98 | ||
|
9079967ecf | ||
|
006b708b57 | ||
|
a7a5a5671f | ||
|
00545ebbe5 | ||
|
62cbcb646a | ||
|
bdb77bc85a | ||
|
b7b30fc961 | ||
|
63f0f9cf5d | ||
|
8fb211ad0e | ||
|
af50c03879 | ||
|
6fbf77aa54 | ||
|
79433fb6a6 | ||
|
16d4149c25 | ||
|
a24f94a36c | ||
|
9c4941a1ea | ||
|
bcb40fddc0 | ||
|
ae869639dd | ||
|
66ee9f0489 | ||
|
9a9716c228 | ||
|
a3f37777c1 | ||
|
5e440a467d | ||
|
7ba5488569 | ||
|
df722bf18e | ||
|
2c5f912e16 | ||
|
8ea1c5c69e | ||
|
92de726102 | ||
|
e90a1199da | ||
|
012f4fef6b | ||
|
e7e8d006cc | ||
|
5a3280ebee | ||
|
27be1e2122 | ||
|
39929bda5a | ||
|
e8e98bb125 | ||
|
132c1f1b0f | ||
|
26e897e861 | ||
|
5523a6fd2c | ||
|
4464a6ff5b | ||
|
a95fe78ae2 | ||
|
c639e52c6b | ||
|
d650e461f9 | ||
|
b855d3421e | ||
|
ef87942a42 | ||
|
059c2ffbea | ||
|
93f14bf121 | ||
|
20cf39ef63 | ||
|
139811bd80 | ||
|
74653597f1 | ||
|
9a6045eee6 | ||
|
332614579c | ||
|
56132983cf | ||
|
2b3173e5d2 | ||
|
4bae540cd6 | ||
|
e51a6f6367 | ||
|
f3ac97a749 | ||
|
dd2d1c3154 | ||
|
4df090ff48 | ||
|
6729ed2c7e | ||
|
ececd68f9a | ||
|
94ce8f561f | ||
|
4f85098088 | ||
|
3720618c63 | ||
|
b664bcf307 | ||
|
a3f99f123a | ||
|
85ed108fa6 | ||
|
54e749d1cf | ||
|
69e801d456 | ||
|
b596102bd5 | ||
|
5f1209bee6 | ||
|
73b684c7b0 | ||
|
b5eb939ce3 | ||
|
0a633f8098 | ||
|
8d20d6c95c | ||
|
1f1b8e825d | ||
|
f051bff1e6 | ||
|
853c95bb89 | ||
|
4269b4b49a | ||
|
6e4d5d9b2a | ||
|
81cf3bff08 | ||
|
f1b16236a4 | ||
|
1c69eea995 | ||
|
b37bb592ec | ||
|
362788cb09 | ||
|
87ab7fc01c | ||
|
15dbd4893e | ||
|
ddec40ac16 | ||
|
8e164f3594 | ||
|
1b71afb277 | ||
|
1ca0ccb4a6 | ||
|
8231359bbb | ||
|
ec793d16de | ||
|
ce0c73c032 | ||
|
ee3fe4226d | ||
|
534dc58363 | ||
|
66213043ac | ||
|
8ee588248e | ||
|
96d51325ad | ||
|
1e2ea34419 | ||
|
e265082db8 | ||
|
786e013375 | ||
|
615edf937e | ||
|
5df765e376 | ||
|
a2452d0b1c | ||
|
6ae0e27c8b | ||
|
fd79cf8551 | ||
|
66e473d519 | ||
|
0e33902f61 | ||
|
7ab42cb582 | ||
|
3501257780 | ||
|
5aa1e847d9 | ||
|
c576b7ca32 | ||
|
b76116bb6c | ||
|
35e44143fd | ||
|
c436d6ea0b | ||
|
bc7197dcfc | ||
|
5b128e6b0e | ||
|
a73988141b | ||
|
b70da07977 | ||
|
e80c98367e | ||
|
fb75821793 | ||
|
33fc7bec85 | ||
|
eb00df98be | ||
|
f30e0a75c4 | ||
|
52a6667da6 | ||
|
3bcfd73898 | ||
|
4aa0493e26 | ||
|
d1e766898c | ||
|
fefe2aa0e4 | ||
|
944ae8780c | ||
|
1ccc2d198e | ||
|
e4d26efea9 | ||
|
89138b8ab8 | ||
|
545ca7db99 | ||
|
daa1309466 | ||
|
4ed2b629a5 | ||
|
c9cd388630 | ||
|
27da57b179 | ||
|
f2dca55ae9 | ||
|
47e4b3724b | ||
|
2d19ba04e3 | ||
|
56058e2e84 | ||
|
90610a9093 | ||
|
38e955d4a9 | ||
|
2a5e52db51 | ||
|
c4e7809ad5 | ||
|
df9f30fdf8 | ||
|
a39dc400ed | ||
|
5456ec5fe1 | ||
|
d2f8bcb890 | ||
|
acfe212a63 | ||
|
42b6e1c6c1 | ||
|
009500bc6d | ||
|
75585d01f0 | ||
|
0be477292b | ||
|
4b6606832c | ||
|
16c7326bc5 | ||
|
677d57b7c7 | ||
|
5c53cf3244 | ||
|
f00f4ae9b6 | ||
|
38505ae9e1 | ||
|
2bb1ee3292 | ||
|
c52eeb70e7 |
@ -3,10 +3,12 @@
|
||||
.github
|
||||
.venv
|
||||
cache
|
||||
data
|
||||
docker
|
||||
saves
|
||||
hf_cache
|
||||
ms_cache
|
||||
om_cache
|
||||
shared_data
|
||||
output
|
||||
.dockerignore
|
||||
.gitattributes
|
||||
|
25
.env.local
@ -1,35 +1,42 @@
|
||||
# Note: actually we do not support .env, just for reference
|
||||
# api
|
||||
API_HOST=0.0.0.0
|
||||
API_PORT=8000
|
||||
API_HOST=
|
||||
API_PORT=
|
||||
API_KEY=
|
||||
API_MODEL_NAME=gpt-3.5-turbo
|
||||
API_MODEL_NAME=
|
||||
API_VERBOSE=
|
||||
FASTAPI_ROOT_PATH=
|
||||
MAX_CONCURRENT=
|
||||
# general
|
||||
DISABLE_VERSION_CHECK=
|
||||
FORCE_CHECK_IMPORTS=
|
||||
FORCE_TORCHRUN=
|
||||
ALLOW_EXTRA_ARGS=
|
||||
LLAMAFACTORY_VERBOSITY=
|
||||
USE_MODELSCOPE_HUB=
|
||||
USE_OPENMIND_HUB=
|
||||
USE_RAY=
|
||||
RECORD_VRAM=
|
||||
OPTIM_TORCH=
|
||||
NPU_JIT_COMPILE=
|
||||
# torchrun
|
||||
FORCE_TORCHRUN=
|
||||
MASTER_ADDR=
|
||||
MASTER_PORT=
|
||||
NNODES=
|
||||
RANK=
|
||||
NODE_RANK=
|
||||
NPROC_PER_NODE=
|
||||
# wandb
|
||||
WANDB_DISABLED=
|
||||
WANDB_PROJECT=huggingface
|
||||
WANDB_PROJECT=
|
||||
WANDB_API_KEY=
|
||||
# gradio ui
|
||||
GRADIO_SHARE=False
|
||||
GRADIO_SERVER_NAME=0.0.0.0
|
||||
GRADIO_SHARE=
|
||||
GRADIO_SERVER_NAME=
|
||||
GRADIO_SERVER_PORT=
|
||||
GRADIO_ROOT_PATH=
|
||||
GRADIO_IPV6=
|
||||
# setup
|
||||
ENABLE_SHORT_CONSOLE=1
|
||||
ENABLE_SHORT_CONSOLE=
|
||||
# reserved (do not use)
|
||||
LLAMABOARD_ENABLED=
|
||||
LLAMABOARD_WORKDIR=
|
||||
|
46
.github/CONTRIBUTING.md
vendored
@ -19,3 +19,49 @@ There are several ways you can contribute to LLaMA Factory:
|
||||
### Style guide
|
||||
|
||||
LLaMA Factory follows the [Google Python Style Guide](https://google.github.io/styleguide/pyguide.html), check it for details.
|
||||
|
||||
### Create a Pull Request
|
||||
|
||||
1. Fork the [repository](https://github.com/hiyouga/LLaMA-Factory) by clicking on the [Fork](https://github.com/hiyouga/LLaMA-Factory/fork) button on the repository's page. This creates a copy of the code under your GitHub user account.
|
||||
|
||||
2. Clone your fork to your local disk, and add the base repository as a remote:
|
||||
|
||||
```bash
|
||||
git clone git@github.com:[username]/LLaMA-Factory.git
|
||||
cd LLaMA-Factory
|
||||
git remote add upstream https://github.com/hiyouga/LLaMA-Factory.git
|
||||
```
|
||||
|
||||
3. Create a new branch to hold your development changes:
|
||||
|
||||
```bash
|
||||
git checkout -b dev_your_branch
|
||||
```
|
||||
|
||||
4. Set up a development environment by running the following command in a virtual environment:
|
||||
|
||||
```bash
|
||||
pip install -e ".[dev]"
|
||||
```
|
||||
|
||||
If LLaMA Factory was already installed in the virtual environment, remove it with `pip uninstall llamafactory` before reinstalling it in editable mode with the -e flag.
|
||||
|
||||
5. Check code before commit:
|
||||
|
||||
```bash
|
||||
make commit
|
||||
make style && make quality
|
||||
make test
|
||||
```
|
||||
|
||||
6. Submit changes:
|
||||
|
||||
```bash
|
||||
git add .
|
||||
git commit -m "commit message"
|
||||
git fetch upstream
|
||||
git rebase upstream/main
|
||||
git push -u origin dev_your_branch
|
||||
```
|
||||
|
||||
7. Create a merge request from your branch `dev_your_branch` at [origin repo](https://github.com/hiyouga/LLaMA-Factory).
|
||||
|
61
.github/ISSUE_TEMPLATE/1-bug-report.yml
vendored
Normal file
@ -0,0 +1,61 @@
|
||||
name: "\U0001F41B Bug / help"
|
||||
description: Create a report to help us improve the LLaMA Factory
|
||||
labels: ["bug", "pending"]
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Issues included in **[FAQs](https://github.com/hiyouga/LLaMA-Factory/issues/4614)** or those with **insufficient** information may be closed without a response.
|
||||
已经包含在 **[常见问题](https://github.com/hiyouga/LLaMA-Factory/issues/4614)** 内或提供信息**不完整**的 issues 可能不会被回复。
|
||||
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Please do not create issues that are not related to framework bugs under this category, use **[Discussions](https://github.com/hiyouga/LLaMA-Factory/discussions/categories/q-a)** instead.
|
||||
请勿在此分类下创建和框架 bug 无关的 issues,训练问题求助请使用 **[讨论区](https://github.com/hiyouga/LLaMA-Factory/discussions/categories/q-a)**。
|
||||
|
||||
- type: checkboxes
|
||||
id: reminder
|
||||
attributes:
|
||||
label: Reminder
|
||||
description: |
|
||||
Please ensure you have read the above rules carefully and searched the existing issues (including FAQs).
|
||||
请确保您已经认真阅读了上述规则并且搜索过现有的 issues(包括常见问题)。
|
||||
|
||||
options:
|
||||
- label: I have read the above rules and searched the existing issues.
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: system-info
|
||||
validations:
|
||||
required: true
|
||||
attributes:
|
||||
label: System Info
|
||||
description: |
|
||||
Please share your system info with us. You can run the command **llamafactory-cli env** and copy-paste its output below.
|
||||
请提供您的系统信息。您可以在命令行运行 **llamafactory-cli env** 并将其输出复制到该文本框中。
|
||||
|
||||
placeholder: llamafactory version, platform, python version, ...
|
||||
|
||||
- type: textarea
|
||||
id: reproduction
|
||||
validations:
|
||||
required: true
|
||||
attributes:
|
||||
label: Reproduction
|
||||
description: |
|
||||
Please provide entry arguments, error messages and stack traces that reproduces the problem.
|
||||
请提供入口参数,错误日志以及异常堆栈以便于我们复现问题。
|
||||
|
||||
value: |
|
||||
```text
|
||||
Put your message here.
|
||||
```
|
||||
|
||||
- type: textarea
|
||||
id: others
|
||||
validations:
|
||||
required: false
|
||||
attributes:
|
||||
label: Others
|
41
.github/ISSUE_TEMPLATE/2-feature-request.yml
vendored
Normal file
@ -0,0 +1,41 @@
|
||||
name: "\U0001F680 Feature request"
|
||||
description: Submit a request for a new feature
|
||||
labels: ["enhancement", "pending"]
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Please do not create issues that are not related to new features under this category.
|
||||
请勿在此分类下创建和新特性无关的 issues。
|
||||
|
||||
- type: checkboxes
|
||||
id: reminder
|
||||
attributes:
|
||||
label: Reminder
|
||||
description: |
|
||||
Please ensure you have read the above rules carefully and searched the existing issues.
|
||||
请确保您已经认真阅读了上述规则并且搜索过现有的 issues。
|
||||
|
||||
options:
|
||||
- label: I have read the above rules and searched the existing issues.
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: description
|
||||
validations:
|
||||
required: true
|
||||
attributes:
|
||||
label: Description
|
||||
description: |
|
||||
A clear and concise description of the feature proposal.
|
||||
请详细描述您希望加入的新功能特性。
|
||||
|
||||
- type: textarea
|
||||
id: contribution
|
||||
validations:
|
||||
required: false
|
||||
attributes:
|
||||
label: Pull Request
|
||||
description: |
|
||||
Have you already created the relevant PR and submitted the code?
|
||||
您是否已经创建了相关 PR 并提交了代码?
|
66
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
@ -1,66 +0,0 @@
|
||||
name: "\U0001F41B Bug / Help"
|
||||
description: Create a report to help us improve the LLaMA Factory
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Issues included in **FAQs** or those with **insufficient** information may be closed without a response.
|
||||
包含在**常见问题**内或提供信息**不完整**的 issues 可能不会被回复。
|
||||
|
||||
- type: checkboxes
|
||||
id: reminder
|
||||
attributes:
|
||||
label: Reminder
|
||||
description: |
|
||||
Please ensure you have read the README carefully and searched the existing issues (including FAQs).
|
||||
请确保您已经认真阅读了 README 并且搜索过现有的 issues(包括常见问题)。
|
||||
|
||||
options:
|
||||
- label: I have read the README and searched the existing issues.
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: system-info
|
||||
validations:
|
||||
required: true
|
||||
attributes:
|
||||
label: System Info
|
||||
description: |
|
||||
Please share your system info with us. You can run the command **llamafactory-cli env** and copy-paste its output below.
|
||||
请提供您的系统信息。您可以在命令行运行 **llamafactory-cli env** 并将其输出复制到该文本框中。
|
||||
|
||||
placeholder: llamafactory version, platform, python version, ...
|
||||
|
||||
- type: textarea
|
||||
id: reproduction
|
||||
validations:
|
||||
required: true
|
||||
attributes:
|
||||
label: Reproduction
|
||||
description: |
|
||||
Please provide code snippets, error messages and stack traces that reproduces the problem.
|
||||
请提供运行参数,错误信息以及异常堆栈以便于我们复现该问题。
|
||||
Remember to use Markdown tags to correctly format your code.
|
||||
请合理使用 Markdown 标签来格式化您的文本。
|
||||
|
||||
placeholder: |
|
||||
```bash
|
||||
llamafactory-cli train ...
|
||||
```
|
||||
|
||||
- type: textarea
|
||||
id: expected-behavior
|
||||
validations:
|
||||
required: false
|
||||
attributes:
|
||||
label: Expected behavior
|
||||
description: |
|
||||
Please provide a clear and concise description of what you would expect to happen.
|
||||
请提供您原本的目的,即这段代码的期望行为。
|
||||
|
||||
- type: textarea
|
||||
id: others
|
||||
validations:
|
||||
required: false
|
||||
attributes:
|
||||
label: Others
|
8
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: 📚 FAQs | 常见问题
|
||||
url: https://github.com/hiyouga/LLaMA-Factory/issues/4614
|
||||
about: Reading in advance is recommended | 建议提前阅读
|
||||
- name: Discussions | 讨论区
|
||||
url: https://github.com/hiyouga/LLaMA-Factory/discussions
|
||||
about: Please ask fine-tuning questions here | 请在这里讨论训练问题
|
108
.github/workflows/docker.yml
vendored
Normal file
@ -0,0 +1,108 @@
|
||||
name: docker
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- "main"
|
||||
paths:
|
||||
- "**/*.py"
|
||||
- "requirements.txt"
|
||||
- "docker/**"
|
||||
- ".github/workflows/*.yml"
|
||||
pull_request:
|
||||
branches:
|
||||
- "main"
|
||||
paths:
|
||||
- "**/*.py"
|
||||
- "requirements.txt"
|
||||
- "docker/**"
|
||||
- ".github/workflows/*.yml"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
device:
|
||||
- "cuda"
|
||||
- "npu"
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}-${{ matrix.device }}
|
||||
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
|
||||
|
||||
environment:
|
||||
name: docker
|
||||
url: https://hub.docker.com/r/hiyouga/llamafactory
|
||||
|
||||
steps:
|
||||
- name: Free up disk space
|
||||
uses: jlumbroso/free-disk-space@54081f138730dfa15788a46383842cd2f914a1be # v1.3.1
|
||||
with:
|
||||
tool-cache: true
|
||||
docker-images: false
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.9"
|
||||
|
||||
- name: Get llamafactory version
|
||||
id: version
|
||||
run: |
|
||||
echo "tag=$(python setup.py --version | sed 's/\.dev0//')" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Login to Docker Hub
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ vars.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Login to Quay
|
||||
if: ${{ github.event_name != 'pull_request' && matrix.device == 'npu' }}
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ vars.QUAY_ASCEND_USERNAME }}
|
||||
password: ${{ secrets.QUAY_ASCEND_TOKEN }}
|
||||
|
||||
- name: Build and push Docker image (CUDA)
|
||||
if: ${{ matrix.device == 'cuda' }}
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
file: ./docker/docker-cuda/Dockerfile
|
||||
build-args: |
|
||||
EXTRAS=metrics,deepspeed,liger-kernel
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
tags: |
|
||||
docker.io/hiyouga/llamafactory:latest
|
||||
docker.io/hiyouga/llamafactory:${{ steps.version.outputs.tag }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
- name: Build and push Docker image (NPU)
|
||||
if: ${{ matrix.device == 'npu' }}
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
platforms: linux/amd64,linux/arm64
|
||||
file: ./docker/docker-npu/Dockerfile
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
tags: |
|
||||
docker.io/hiyouga/llamafactory:latest-npu-a2
|
||||
docker.io/hiyouga/llamafactory:${{ steps.version.outputs.tag }}-npu-a2
|
||||
quay.io/ascend/llamafactory:latest-npu-a2
|
||||
quay.io/ascend/llamafactory:${{ steps.version.outputs.tag }}-npu-a2
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
10
.github/workflows/label_issue.yml
vendored
@ -18,13 +18,15 @@ jobs:
|
||||
ISSUE_URL: ${{ github.event.issue.html_url }}
|
||||
ISSUE_TITLE: ${{ github.event.issue.title }}
|
||||
run: |
|
||||
LABEL=pending
|
||||
NPU_KEYWORDS=(npu huawei ascend 华为 昇腾)
|
||||
LABEL=""
|
||||
NPU_KEYWORDS=(npu huawei ascend 华为 昇腾 910)
|
||||
ISSUE_TITLE_LOWER=$(echo $ISSUE_TITLE | tr '[:upper:]' '[:lower:]')
|
||||
for KEYWORD in ${NPU_KEYWORDS[@]}; do
|
||||
if [[ $ISSUE_TITLE_LOWER == *$KEYWORD* ]] && [[ $ISSUE_TITLE_LOWER != *input* ]]; then
|
||||
LABEL=pending,npu
|
||||
LABEL="npu"
|
||||
break
|
||||
fi
|
||||
done
|
||||
gh issue edit $ISSUE_URL --add-label $LABEL
|
||||
if [ -n "$LABEL" ]; then
|
||||
gh issue edit $ISSUE_URL --add-label $LABEL
|
||||
fi
|
||||
|
14
.github/workflows/publish.yml
vendored
@ -1,6 +1,7 @@
|
||||
name: publish
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
release:
|
||||
types:
|
||||
- published
|
||||
@ -25,16 +26,11 @@ jobs:
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.8"
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
python -m pip install build
|
||||
|
||||
python-version: "3.9"
|
||||
|
||||
- name: Build package
|
||||
run: |
|
||||
python -m build
|
||||
|
||||
make build
|
||||
|
||||
- name: Publish package
|
||||
uses: pypa/gh-action-pypi-publish@release/v1
|
||||
|
51
.github/workflows/tests.yml
vendored
@ -1,18 +1,19 @@
|
||||
name: tests
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- "main"
|
||||
paths:
|
||||
- "**.py"
|
||||
- "**/*.py"
|
||||
- "requirements.txt"
|
||||
- ".github/workflows/*.yml"
|
||||
pull_request:
|
||||
branches:
|
||||
- "main"
|
||||
paths:
|
||||
- "**.py"
|
||||
- "**/*.py"
|
||||
- "requirements.txt"
|
||||
- ".github/workflows/*.yml"
|
||||
|
||||
@ -21,20 +22,30 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version:
|
||||
- "3.8"
|
||||
python:
|
||||
- "3.9"
|
||||
- "3.10"
|
||||
- "3.11"
|
||||
- "3.12"
|
||||
os:
|
||||
- "ubuntu-latest"
|
||||
- "windows-latest"
|
||||
- "macos-13"
|
||||
transformers:
|
||||
- null
|
||||
include: # test backward compatibility
|
||||
- python: "3.9"
|
||||
os: "ubuntu-latest"
|
||||
transformers: "4.49.0"
|
||||
- python: "3.9"
|
||||
os: "ubuntu-latest"
|
||||
transformers: "4.51.0"
|
||||
|
||||
runs-on: ${{ matrix.os }}
|
||||
|
||||
environment:
|
||||
name: tests
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}-${{ matrix.os }}-${{ matrix.python }}-${{ matrix.transformers }}
|
||||
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
|
||||
|
||||
env:
|
||||
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
||||
@ -47,20 +58,42 @@ jobs:
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
python-version: ${{ matrix.python }}
|
||||
cache: "pip"
|
||||
cache-dependency-path: "setup.py"
|
||||
cache-dependency-path: "**/requirements*.txt"
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
python -m pip install git+https://github.com/huggingface/transformers.git
|
||||
python -m pip install ".[torch,dev]"
|
||||
|
||||
- name: Install transformers
|
||||
if: ${{ matrix.transformers }}
|
||||
run: |
|
||||
python -m pip install "transformers==${{ matrix.transformers }}"
|
||||
|
||||
- name: Cache files
|
||||
id: hf-hub-cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ${{ runner.temp }}/huggingface
|
||||
key: huggingface-${{ matrix.os }}-${{ matrix.python }}-${{ matrix.transformers }}-${{ hashFiles('tests/version.txt') }}
|
||||
|
||||
- name: Check quality
|
||||
run: |
|
||||
make style && make quality
|
||||
|
||||
- name: Check license
|
||||
run: |
|
||||
make license
|
||||
|
||||
- name: Check build
|
||||
run: |
|
||||
make build
|
||||
|
||||
- name: Test with pytest
|
||||
run: |
|
||||
make test
|
||||
env:
|
||||
HF_HOME: ${{ runner.temp }}/huggingface
|
||||
HF_HUB_OFFLINE: "${{ steps.hf-hub-cache.outputs.cache-hit == 'true' && '1' || '0' }}"
|
||||
|
12
.gitignore
vendored
@ -159,11 +159,21 @@ cython_debug/
|
||||
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
||||
.idea/
|
||||
|
||||
# vscode
|
||||
.vscode/
|
||||
|
||||
# uv
|
||||
uv.lock
|
||||
|
||||
# custom .gitignore
|
||||
ms_cache/
|
||||
hf_cache/
|
||||
ms_cache/
|
||||
om_cache/
|
||||
cache/
|
||||
config/
|
||||
saves/
|
||||
output/
|
||||
wandb/
|
||||
swanlog/
|
||||
generated_predictions.jsonl
|
||||
predictions_score.json
|
||||
|
28
.pre-commit-config.yaml
Normal file
@ -0,0 +1,28 @@
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v5.0.0
|
||||
hooks:
|
||||
- id: check-ast
|
||||
- id: check-added-large-files
|
||||
args: ['--maxkb=25000']
|
||||
- id: check-merge-conflict
|
||||
- id: check-yaml
|
||||
- id: debug-statements
|
||||
- id: end-of-file-fixer
|
||||
- id: trailing-whitespace
|
||||
args: [--markdown-linebreak-ext=md]
|
||||
- id: no-commit-to-branch
|
||||
args: ['--branch', 'main']
|
||||
|
||||
- repo: https://github.com/asottile/pyupgrade
|
||||
rev: v3.17.0
|
||||
hooks:
|
||||
- id: pyupgrade
|
||||
args: [--py38-plus]
|
||||
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
rev: v0.6.9
|
||||
hooks:
|
||||
- id: ruff
|
||||
args: [--fix]
|
||||
- id: ruff-format
|
14
Makefile
@ -1,7 +1,17 @@
|
||||
.PHONY: quality style test
|
||||
.PHONY: build commit license quality style test
|
||||
|
||||
check_dirs := scripts src tests setup.py
|
||||
|
||||
build:
|
||||
pip3 install build && python3 -m build
|
||||
|
||||
commit:
|
||||
pre-commit install
|
||||
pre-commit run --all-files
|
||||
|
||||
license:
|
||||
python3 tests/check_license.py $(check_dirs)
|
||||
|
||||
quality:
|
||||
ruff check $(check_dirs)
|
||||
ruff format --check $(check_dirs)
|
||||
@ -11,4 +21,4 @@ style:
|
||||
ruff format $(check_dirs)
|
||||
|
||||
test:
|
||||
CUDA_VISIBLE_DEVICES= pytest tests/
|
||||
CUDA_VISIBLE_DEVICES= WANDB_DISABLED=true pytest -vv tests/
|
||||
|
498
README.md
@ -1,45 +1,86 @@
|
||||

|
||||
|
||||
[](https://github.com/hiyouga/LLaMA-Factory/stargazers)
|
||||
[](LICENSE)
|
||||
[](https://github.com/hiyouga/LLaMA-Factory/commits/main)
|
||||
[](https://github.com/hiyouga/LLaMA-Factory/graphs/contributors)
|
||||
[](https://github.com/hiyouga/LLaMA-Factory/actions/workflows/tests.yml)
|
||||
[](https://pypi.org/project/llamafactory/)
|
||||
[](#projects-using-llama-factory)
|
||||
[](https://github.com/hiyouga/LLaMA-Factory/pulls)
|
||||
[](https://discord.gg/rKfvV9r9FK)
|
||||
[](https://scholar.google.com/scholar?cites=12620864006390196564)
|
||||
[](https://hub.docker.com/r/hiyouga/llamafactory/tags)
|
||||
|
||||
[](https://twitter.com/llamafactory_ai)
|
||||
[](https://discord.gg/rKfvV9r9FK)
|
||||
[](https://gitcode.com/zhengyaowei/LLaMA-Factory)
|
||||
|
||||
[](https://colab.research.google.com/drive/1eRTPn37ltBbYsISy9Aw2NuI2Aq5CQrD9?usp=sharing)
|
||||
[](https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory)
|
||||
[](https://huggingface.co/spaces/hiyouga/LLaMA-Board)
|
||||
[](https://modelscope.cn/studios/hiyouga/LLaMA-Board)
|
||||
[](https://docs.alayanew.com/docs/documents/newActivities/llamafactory/?utm_source=LLaMA-Factory)
|
||||
[](https://huggingface.co/spaces/hiyouga/LLaMA-Board)
|
||||
[](https://modelscope.cn/studios/hiyouga/LLaMA-Board)
|
||||
[](https://novita.ai/templates-library/105981?sharer=88115474-394e-4bda-968e-b88e123d0c47)
|
||||
|
||||
[](https://trendshift.io/repositories/4535)
|
||||
### Used by [Amazon](https://aws.amazon.com/cn/blogs/machine-learning/how-apoidea-group-enhances-visual-information-extraction-from-banking-documents-with-multimodal-models-using-llama-factory-on-amazon-sagemaker-hyperpod/), [NVIDIA](https://developer.nvidia.com/rtx/ai-toolkit), [Aliyun](https://help.aliyun.com/zh/pai/use-cases/fine-tune-a-llama-3-model-with-llama-factory), etc.
|
||||
|
||||
👋 Join our [WeChat](assets/wechat.jpg) or [NPU user group](assets/wechat_npu.jpg).
|
||||
<div align="center" markdown="1">
|
||||
|
||||
### Supporters ❤️
|
||||
|
||||
<a href="https://warp.dev/llama-factory">
|
||||
<img alt="Warp sponsorship" width="400" src="https://github.com/user-attachments/assets/ab8dd143-b0fd-4904-bdc5-dd7ecac94eae">
|
||||
</a>
|
||||
|
||||
#### [Warp, the agentic terminal for developers](https://warp.dev/llama-factory)
|
||||
|
||||
[Available for MacOS, Linux, & Windows](https://warp.dev/llama-factory)
|
||||
|
||||
----
|
||||
|
||||
### Easily fine-tune 100+ large language models with zero-code [CLI](#quickstart) and [Web UI](#fine-tuning-with-llama-board-gui-powered-by-gradio)
|
||||
|
||||

|
||||
|
||||
</div>
|
||||
|
||||
👋 Join our [WeChat group](assets/wechat.jpg), [NPU user group](assets/wechat_npu.jpg) or [Alaya NeW user group](assets/wechat_alaya.png).
|
||||
|
||||
\[ English | [中文](README_zh.md) \]
|
||||
|
||||
**Fine-tuning a large language model can be easy as...**
|
||||
|
||||
https://github.com/user-attachments/assets/7c96b465-9df7-45f4-8053-bf03e58386d3
|
||||
https://github.com/user-attachments/assets/3991a3a8-4276-4d30-9cab-4cb0c4b9b99e
|
||||
|
||||
Choose your path:
|
||||
|
||||
- **Colab**: https://colab.research.google.com/drive/1eRTPn37ltBbYsISy9Aw2NuI2Aq5CQrD9?usp=sharing
|
||||
- **PAI-DSW**: https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory
|
||||
- **Documentation (WIP)**: https://llamafactory.readthedocs.io/en/latest/
|
||||
- **Documentation (AMD GPU)**: https://rocm.docs.amd.com/projects/ai-developer-hub/en/latest/notebooks/fine_tune/llama_factory_llama3.html
|
||||
- **Colab (free)**: https://colab.research.google.com/drive/1eRTPn37ltBbYsISy9Aw2NuI2Aq5CQrD9?usp=sharing
|
||||
- **Local machine**: Please refer to [usage](#getting-started)
|
||||
- **Documentation (WIP)**: https://llamafactory.readthedocs.io/zh-cn/latest/
|
||||
- **PAI-DSW (free trial)**: https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory
|
||||
- **Alaya NeW (cloud GPU deal)**: https://docs.alayanew.com/docs/documents/useGuide/LLaMAFactory/mutiple/?utm_source=LLaMA-Factory
|
||||
|
||||
> [!NOTE]
|
||||
> Except for the above links, all other websites are unauthorized third-party websites. Please carefully use them.
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [Features](#features)
|
||||
- [Benchmark](#benchmark)
|
||||
- [Blogs](#blogs)
|
||||
- [Changelog](#changelog)
|
||||
- [Supported Models](#supported-models)
|
||||
- [Supported Training Approaches](#supported-training-approaches)
|
||||
- [Provided Datasets](#provided-datasets)
|
||||
- [Requirement](#requirement)
|
||||
- [Getting Started](#getting-started)
|
||||
- [Installation](#installation)
|
||||
- [Data Preparation](#data-preparation)
|
||||
- [Quickstart](#quickstart)
|
||||
- [Fine-Tuning with LLaMA Board GUI](#fine-tuning-with-llama-board-gui-powered-by-gradio)
|
||||
- [Build Docker](#build-docker)
|
||||
- [Deploy with OpenAI-style API and vLLM](#deploy-with-openai-style-api-and-vllm)
|
||||
- [Download from ModelScope Hub](#download-from-modelscope-hub)
|
||||
- [Download from Modelers Hub](#download-from-modelers-hub)
|
||||
- [Use W&B Logger](#use-wb-logger)
|
||||
- [Use SwanLab Logger](#use-swanlab-logger)
|
||||
- [Projects using LLaMA Factory](#projects-using-llama-factory)
|
||||
- [License](#license)
|
||||
- [Citation](#citation)
|
||||
@ -47,42 +88,94 @@ Choose your path:
|
||||
|
||||
## Features
|
||||
|
||||
- **Various models**: LLaMA, LLaVA, Mistral, Mixtral-MoE, Qwen, Qwen2-VL, Yi, Gemma, Baichuan, ChatGLM, Phi, etc.
|
||||
- **Various models**: LLaMA, LLaVA, Mistral, Mixtral-MoE, Qwen, Qwen2-VL, DeepSeek, Yi, Gemma, ChatGLM, Phi, etc.
|
||||
- **Integrated methods**: (Continuous) pre-training, (multimodal) supervised fine-tuning, reward modeling, PPO, DPO, KTO, ORPO, etc.
|
||||
- **Scalable resources**: 16-bit full-tuning, freeze-tuning, LoRA and 2/3/4/5/6/8-bit QLoRA via AQLM/AWQ/GPTQ/LLM.int8/HQQ/EETQ.
|
||||
- **Advanced algorithms**: [GaLore](https://github.com/jiaweizzhao/GaLore), [BAdam](https://github.com/Ledzy/BAdam), [Adam-mini](https://github.com/zyushun/Adam-mini), DoRA, LongLoRA, LLaMA Pro, Mixture-of-Depths, LoRA+, LoftQ, PiSSA and Agent tuning.
|
||||
- **Advanced algorithms**: [GaLore](https://github.com/jiaweizzhao/GaLore), [BAdam](https://github.com/Ledzy/BAdam), [APOLLO](https://github.com/zhuhanqing/APOLLO), [Adam-mini](https://github.com/zyushun/Adam-mini), [Muon](https://github.com/KellerJordan/Muon), DoRA, LongLoRA, LLaMA Pro, Mixture-of-Depths, LoRA+, LoftQ and PiSSA.
|
||||
- **Practical tricks**: [FlashAttention-2](https://github.com/Dao-AILab/flash-attention), [Unsloth](https://github.com/unslothai/unsloth), [Liger Kernel](https://github.com/linkedin/Liger-Kernel), RoPE scaling, NEFTune and rsLoRA.
|
||||
- **Experiment monitors**: LlamaBoard, TensorBoard, Wandb, MLflow, etc.
|
||||
- **Faster inference**: OpenAI-style API, Gradio UI and CLI with vLLM worker.
|
||||
- **Wide tasks**: Multi-turn dialogue, tool using, image understanding, visual grounding, video recognition, audio understanding, etc.
|
||||
- **Experiment monitors**: LlamaBoard, TensorBoard, Wandb, MLflow, [SwanLab](https://github.com/SwanHubX/SwanLab), etc.
|
||||
- **Faster inference**: OpenAI-style API, Gradio UI and CLI with [vLLM worker](https://github.com/vllm-project/vllm) or [SGLang worker](https://github.com/sgl-project/sglang).
|
||||
|
||||
## Benchmark
|
||||
### Day-N Support for Fine-Tuning Cutting-Edge Models
|
||||
|
||||
Compared to ChatGLM's [P-Tuning](https://github.com/THUDM/ChatGLM2-6B/tree/main/ptuning), LLaMA Factory's LoRA tuning offers up to **3.7 times faster** training speed with a better Rouge score on the advertising text generation task. By leveraging 4-bit quantization technique, LLaMA Factory's QLoRA further improves the efficiency regarding the GPU memory.
|
||||
| Support Date | Model Name |
|
||||
| ------------ | -------------------------------------------------------------------- |
|
||||
| Day 0 | Qwen3 / Qwen2.5-VL / Gemma 3 / GLM-4.1V / InternLM 3 / MiniCPM-o-2.6 |
|
||||
| Day 1 | Llama 3 / GLM-4 / Mistral Small / PaliGemma2 / Llama 4 |
|
||||
|
||||

|
||||
## Blogs
|
||||
|
||||
<details><summary>Definitions</summary>
|
||||
- [Fine-tune Llama3.1-70B for Medical Diagnosis using LLaMA-Factory](https://docs.alayanew.com/docs/documents/bestPractice/bigModel/llama70B/) (Chinese)
|
||||
- [A One-Stop Code-Free Model Reinforcement Learning and Deployment Platform based on LLaMA-Factory and EasyR1](https://aws.amazon.com/cn/blogs/china/building-llm-model-hub-based-on-llamafactory-and-easyr1/) (Chinese)
|
||||
- [How Apoidea Group enhances visual information extraction from banking documents with multimodal models using LLaMA-Factory on Amazon SageMaker HyperPod](https://aws.amazon.com/cn/blogs/machine-learning/how-apoidea-group-enhances-visual-information-extraction-from-banking-documents-with-multimodal-models-using-llama-factory-on-amazon-sagemaker-hyperpod/) (English)
|
||||
- [Easy Dataset × LLaMA Factory: Enabling LLMs to Efficiently Learn Domain Knowledge](https://buaa-act.feishu.cn/wiki/GVzlwYcRFiR8OLkHbL6cQpYin7g) (English)
|
||||
|
||||
- **Training Speed**: the number of training samples processed per second during the training. (bs=4, cutoff_len=1024)
|
||||
- **Rouge Score**: Rouge-2 score on the development set of the [advertising text generation](https://aclanthology.org/D19-1321.pdf) task. (bs=4, cutoff_len=1024)
|
||||
- **GPU Memory**: Peak GPU memory usage in 4-bit quantized training. (bs=1, cutoff_len=1024)
|
||||
- We adopt `pre_seq_len=128` for ChatGLM's P-Tuning and `lora_rank=32` for LLaMA Factory's LoRA tuning.
|
||||
<details><summary>All Blogs</summary>
|
||||
|
||||
- [Fine-tune Qwen2.5-VL for Autonomous Driving using LLaMA-Factory](https://docs.alayanew.com/docs/documents/useGuide/LLaMAFactory/mutiple/?utm_source=LLaMA-Factory) (Chinese)
|
||||
- [LLaMA Factory: Fine-tuning the DeepSeek-R1-Distill-Qwen-7B Model for News Classifier](https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory_deepseek_r1_distill_7b) (Chinese)
|
||||
- [A One-Stop Code-Free Model Fine-Tuning \& Deployment Platform based on SageMaker and LLaMA-Factory](https://aws.amazon.com/cn/blogs/china/a-one-stop-code-free-model-fine-tuning-deployment-platform-based-on-sagemaker-and-llama-factory/) (Chinese)
|
||||
- [LLaMA Factory Multi-Modal Fine-Tuning Practice: Fine-Tuning Qwen2-VL for Personal Tourist Guide](https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory_qwen2vl) (Chinese)
|
||||
- [LLaMA Factory: Fine-tuning Llama3 for Role-Playing](https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory) (Chinese)
|
||||
|
||||
</details>
|
||||
|
||||
## Changelog
|
||||
|
||||
[24/08/30] We support fine-tuning the **[Qwen2-VL](https://qwenlm.github.io/blog/qwen2-vl/)** models. Thank [@simonJJJ](https://github.com/simonJJJ)'s PR.
|
||||
[25/07/02] We supported fine-tuning the **[GLM-4.1V-9B-Thinking](https://github.com/THUDM/GLM-4.1V-Thinking)** model. Please install transformers from **main** branch to use.
|
||||
|
||||
[24/08/27] We support **[Liger Kernel](https://github.com/linkedin/Liger-Kernel)**. Try `enable_liger_kernel: true` for efficient training.
|
||||
[25/04/28] We supported fine-tuning the **[Qwen3](https://qwenlm.github.io/blog/qwen3/)** model family.
|
||||
|
||||
[24/08/09] We support **[Adam-mini](https://github.com/zyushun/Adam-mini)** optimizer. See [examples](examples/README.md) for usage. Thank [@relic-yuexi](https://github.com/relic-yuexi)'s PR.
|
||||
[25/04/21] We supported the **[Muon](https://github.com/KellerJordan/Muon)** optimizer. See [examples](examples/README.md) for usage. Thank [@tianshijing](https://github.com/tianshijing)'s PR.
|
||||
|
||||
[25/04/16] We supported fine-tuning the **[InternVL3](https://huggingface.co/OpenGVLab/InternVL3-8B)** model. See [PR #7258](https://github.com/hiyouga/LLaMA-Factory/pull/7258) to get started.
|
||||
|
||||
[25/04/14] We supported fine-tuning the **[GLM-Z1](https://huggingface.co/THUDM/GLM-Z1-9B-0414)** and **[Kimi-VL](https://huggingface.co/moonshotai/Kimi-VL-A3B-Instruct)** models.
|
||||
|
||||
[25/04/06] We supported fine-tuning the **[Llama 4](https://ai.meta.com/blog/llama-4-multimodal-intelligence/)** model. See [PR #7611](https://github.com/hiyouga/LLaMA-Factory/pull/7611) to get started.
|
||||
|
||||
<details><summary>Full Changelog</summary>
|
||||
|
||||
[24/07/04] We support [contamination-free packed training](https://github.com/MeetKai/functionary/tree/main/functionary/train/packing). Use `neat_packing: true` to activate it. Thank [@chuan298](https://github.com/chuan298)'s PR.
|
||||
[25/03/31] We supported fine-tuning the **[Qwen2.5 Omni](https://qwenlm.github.io/blog/qwen2.5-omni/)** model. See [PR #7537](https://github.com/hiyouga/LLaMA-Factory/pull/7537) to get started.
|
||||
|
||||
[24/06/16] We support **[PiSSA](https://arxiv.org/abs/2404.02948)** algorithm. See [examples](examples/README.md) for usage.
|
||||
[25/03/15] We supported **[SGLang](https://github.com/sgl-project/sglang)** as inference backend. Try `infer_backend: sglang` to accelerate inference.
|
||||
|
||||
[25/03/12] We supported fine-tuning the **[Gemma 3](https://huggingface.co/blog/gemma3)** model.
|
||||
|
||||
[25/02/24] Announcing **[EasyR1](https://github.com/hiyouga/EasyR1)**, an efficient, scalable and multi-modality RL training framework for efficient GRPO training.
|
||||
|
||||
[25/02/11] We supported saving the **[Ollama](https://github.com/ollama/ollama)** modelfile when exporting the model checkpoints. See [examples](examples/README.md) for usage.
|
||||
|
||||
[25/02/05] We supported fine-tuning the **[Qwen2-Audio](Qwen/Qwen2-Audio-7B-Instruct)** and **[MiniCPM-o-2.6](https://huggingface.co/openbmb/MiniCPM-o-2_6)** on audio understanding tasks.
|
||||
|
||||
[25/01/31] We supported fine-tuning the **[DeepSeek-R1](https://huggingface.co/deepseek-ai/DeepSeek-R1)** and **[Qwen2.5-VL](https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct)** models.
|
||||
|
||||
[25/01/15] We supported **[APOLLO](https://arxiv.org/abs/2412.05270)** optimizer. See [examples](examples/README.md) for usage.
|
||||
|
||||
[25/01/14] We supported fine-tuning the **[MiniCPM-o-2.6](https://huggingface.co/openbmb/MiniCPM-o-2_6)** and **[MiniCPM-V-2.6](https://huggingface.co/openbmb/MiniCPM-V-2_6)** models. Thank [@BUAADreamer](https://github.com/BUAADreamer)'s PR.
|
||||
|
||||
[25/01/14] We supported fine-tuning the **[InternLM 3](https://huggingface.co/collections/internlm/)** models. Thank [@hhaAndroid](https://github.com/hhaAndroid)'s PR.
|
||||
|
||||
[25/01/10] We supported fine-tuning the **[Phi-4](https://huggingface.co/microsoft/phi-4)** model.
|
||||
|
||||
[24/12/21] We supported using **[SwanLab](https://github.com/SwanHubX/SwanLab)** for experiment tracking and visualization. See [this section](#use-swanlab-logger) for details.
|
||||
|
||||
[24/11/27] We supported fine-tuning the **[Skywork-o1](https://huggingface.co/Skywork/Skywork-o1-Open-Llama-3.1-8B)** model and the **[OpenO1](https://huggingface.co/datasets/O1-OPEN/OpenO1-SFT)** dataset.
|
||||
|
||||
[24/10/09] We supported downloading pre-trained models and datasets from the **[Modelers Hub](https://modelers.cn/models)**. See [this tutorial](#download-from-modelers-hub) for usage.
|
||||
|
||||
[24/09/19] We supported fine-tuning the **[Qwen2.5](https://qwenlm.github.io/blog/qwen2.5/)** models.
|
||||
|
||||
[24/08/30] We supported fine-tuning the **[Qwen2-VL](https://qwenlm.github.io/blog/qwen2-vl/)** models. Thank [@simonJJJ](https://github.com/simonJJJ)'s PR.
|
||||
|
||||
[24/08/27] We supported **[Liger Kernel](https://github.com/linkedin/Liger-Kernel)**. Try `enable_liger_kernel: true` for efficient training.
|
||||
|
||||
[24/08/09] We supported **[Adam-mini](https://github.com/zyushun/Adam-mini)** optimizer. See [examples](examples/README.md) for usage. Thank [@relic-yuexi](https://github.com/relic-yuexi)'s PR.
|
||||
|
||||
[24/07/04] We supported [contamination-free packed training](https://github.com/MeetKai/functionary/tree/main/functionary/train/packing). Use `neat_packing: true` to activate it. Thank [@chuan298](https://github.com/chuan298)'s PR.
|
||||
|
||||
[24/06/16] We supported **[PiSSA](https://arxiv.org/abs/2404.02948)** algorithm. See [examples](examples/README.md) for usage.
|
||||
|
||||
[24/06/07] We supported fine-tuning the **[Qwen2](https://qwenlm.github.io/blog/qwen2/)** and **[GLM-4](https://github.com/THUDM/GLM-4)** models.
|
||||
|
||||
@ -128,7 +221,7 @@ Compared to ChatGLM's [P-Tuning](https://github.com/THUDM/ChatGLM2-6B/tree/main/
|
||||
|
||||
[23/12/12] We supported fine-tuning the latest MoE model **[Mixtral 8x7B](https://huggingface.co/mistralai/Mixtral-8x7B-v0.1)** in our framework. See hardware requirement [here](#hardware-requirement).
|
||||
|
||||
[23/12/01] We supported downloading pre-trained models and datasets from the **[ModelScope Hub](https://modelscope.cn/models)** for Chinese mainland users. See [this tutorial](#download-from-modelscope-hub) for usage.
|
||||
[23/12/01] We supported downloading pre-trained models and datasets from the **[ModelScope Hub](https://modelscope.cn/models)**. See [this tutorial](#download-from-modelscope-hub) for usage.
|
||||
|
||||
[23/10/21] We supported **[NEFTune](https://arxiv.org/abs/2310.05914)** trick for fine-tuning. Try `neftune_noise_alpha: 5` argument to activate NEFTune.
|
||||
|
||||
@ -158,41 +251,78 @@ Compared to ChatGLM's [P-Tuning](https://github.com/THUDM/ChatGLM2-6B/tree/main/
|
||||
|
||||
</details>
|
||||
|
||||
> [!TIP]
|
||||
> If you cannot use the latest feature, please pull the latest code and install LLaMA-Factory again.
|
||||
|
||||
## Supported Models
|
||||
|
||||
| Model | Model size | Template |
|
||||
| ----------------------------------------------------------------- | -------------------------------- | --------- |
|
||||
| [Baichuan 2](https://huggingface.co/baichuan-inc) | 7B/13B | baichuan2 |
|
||||
| [BLOOM/BLOOMZ](https://huggingface.co/bigscience) | 560M/1.1B/1.7B/3B/7.1B/176B | - |
|
||||
| [ChatGLM3](https://huggingface.co/THUDM) | 6B | chatglm3 |
|
||||
| [Command R](https://huggingface.co/CohereForAI) | 35B/104B | cohere |
|
||||
| [DeepSeek (Code/MoE)](https://huggingface.co/deepseek-ai) | 7B/16B/67B/236B | deepseek |
|
||||
| [Falcon](https://huggingface.co/tiiuae) | 7B/11B/40B/180B | falcon |
|
||||
| [Gemma/Gemma 2/CodeGemma](https://huggingface.co/google) | 2B/7B/9B/27B | gemma |
|
||||
| [GLM-4](https://huggingface.co/THUDM) | 9B | glm4 |
|
||||
| [InternLM2/InternLM2.5](https://huggingface.co/internlm) | 7B/20B | intern2 |
|
||||
| [Llama](https://github.com/facebookresearch/llama) | 7B/13B/33B/65B | - |
|
||||
| [Llama 2](https://huggingface.co/meta-llama) | 7B/13B/70B | llama2 |
|
||||
| [Llama 3/Llama 3.1](https://huggingface.co/meta-llama) | 8B/70B | llama3 |
|
||||
| [LLaVA-1.5](https://huggingface.co/llava-hf) | 7B/13B | llava |
|
||||
| [MiniCPM](https://huggingface.co/openbmb) | 1B/2B/4B | cpm/cpm3 |
|
||||
| [Mistral/Mixtral](https://huggingface.co/mistralai) | 7B/8x7B/8x22B | mistral |
|
||||
| [OLMo](https://huggingface.co/allenai) | 1B/7B | - |
|
||||
| [PaliGemma](https://huggingface.co/google) | 3B | paligemma |
|
||||
| [Phi-1.5/Phi-2](https://huggingface.co/microsoft) | 1.3B/2.7B | - |
|
||||
| [Phi-3](https://huggingface.co/microsoft) | 4B/7B/14B | phi |
|
||||
| [Qwen/Qwen1.5/Qwen2 (Code/Math/MoE)](https://huggingface.co/Qwen) | 0.5B/1.5B/4B/7B/14B/32B/72B/110B | qwen |
|
||||
| [Qwen2-VL](https://huggingface.co/Qwen) | 2B/7B | qwen2_vl |
|
||||
| [StarCoder 2](https://huggingface.co/bigcode) | 3B/7B/15B | - |
|
||||
| [XVERSE](https://huggingface.co/xverse) | 7B/13B/65B | xverse |
|
||||
| [Yi/Yi-1.5 (Code)](https://huggingface.co/01-ai) | 1.5B/6B/9B/34B | yi |
|
||||
| [Yi-VL](https://huggingface.co/01-ai) | 6B/34B | yi_vl |
|
||||
| [Yuan 2](https://huggingface.co/IEITYuan) | 2B/51B/102B | yuan |
|
||||
| Model | Model size | Template |
|
||||
| ----------------------------------------------------------------- | -------------------------------- | ------------------- |
|
||||
| [Baichuan 2](https://huggingface.co/baichuan-inc) | 7B/13B | baichuan2 |
|
||||
| [BLOOM/BLOOMZ](https://huggingface.co/bigscience) | 560M/1.1B/1.7B/3B/7.1B/176B | - |
|
||||
| [ChatGLM3](https://huggingface.co/THUDM) | 6B | chatglm3 |
|
||||
| [Command R](https://huggingface.co/CohereForAI) | 35B/104B | cohere |
|
||||
| [DeepSeek (Code/MoE)](https://huggingface.co/deepseek-ai) | 7B/16B/67B/236B | deepseek |
|
||||
| [DeepSeek 2.5/3](https://huggingface.co/deepseek-ai) | 236B/671B | deepseek3 |
|
||||
| [DeepSeek R1 (Distill)](https://huggingface.co/deepseek-ai) | 1.5B/7B/8B/14B/32B/70B/671B | deepseekr1 |
|
||||
| [Falcon](https://huggingface.co/tiiuae) | 7B/11B/40B/180B | falcon |
|
||||
| [Falcon-H1](https://huggingface.co/tiiuae) | 0.5B/1.5B/3B/7B/34B | falcon_h1 |
|
||||
| [Gemma/Gemma 2/CodeGemma](https://huggingface.co/google) | 2B/7B/9B/27B | gemma/gemma2 |
|
||||
| [Gemma 3/Gemma 3n](https://huggingface.co/google) | 1B/4B/6B/8B/12B/27B | gemma3/gemma3n |
|
||||
| [GLM-4/GLM-4-0414/GLM-Z1](https://huggingface.co/zai-org) | 9B/32B | glm4/glmz1 |
|
||||
| [GLM-4.1V](https://huggingface.co/zai-org)* | 9B | glm4v |
|
||||
| [GLM-4.5](https://huggingface.co/zai-org)* | 106B/355B | glm4_moe |
|
||||
| [GPT-2](https://huggingface.co/openai-community) | 0.1B/0.4B/0.8B/1.5B | - |
|
||||
| [Granite 3.0-3.3](https://huggingface.co/ibm-granite) | 1B/2B/3B/8B | granite3 |
|
||||
| [Granite 4](https://huggingface.co/ibm-granite) | 7B | granite4 |
|
||||
| [Hunyuan](https://huggingface.co/tencent/) | 7B | hunyuan |
|
||||
| [Index](https://huggingface.co/IndexTeam) | 1.9B | index |
|
||||
| [InternLM 2-3](https://huggingface.co/internlm) | 7B/8B/20B | intern2 |
|
||||
| [InternVL 2.5-3](https://huggingface.co/OpenGVLab) | 1B/2B/8B/14B/38B/78B | intern_vl |
|
||||
| [Kimi-VL](https://huggingface.co/moonshotai) | 16B | kimi_vl |
|
||||
| [Llama](https://github.com/facebookresearch/llama) | 7B/13B/33B/65B | - |
|
||||
| [Llama 2](https://huggingface.co/meta-llama) | 7B/13B/70B | llama2 |
|
||||
| [Llama 3-3.3](https://huggingface.co/meta-llama) | 1B/3B/8B/70B | llama3 |
|
||||
| [Llama 4](https://huggingface.co/meta-llama) | 109B/402B | llama4 |
|
||||
| [Llama 3.2 Vision](https://huggingface.co/meta-llama) | 11B/90B | mllama |
|
||||
| [LLaVA-1.5](https://huggingface.co/llava-hf) | 7B/13B | llava |
|
||||
| [LLaVA-NeXT](https://huggingface.co/llava-hf) | 7B/8B/13B/34B/72B/110B | llava_next |
|
||||
| [LLaVA-NeXT-Video](https://huggingface.co/llava-hf) | 7B/34B | llava_next_video |
|
||||
| [MiMo](https://huggingface.co/XiaomiMiMo) | 7B | mimo |
|
||||
| [MiniCPM](https://huggingface.co/openbmb) | 0.5B/1B/2B/4B/8B | cpm/cpm3/cpm4 |
|
||||
| [MiniCPM-o-2.6/MiniCPM-V-2.6](https://huggingface.co/openbmb) | 8B | minicpm_o/minicpm_v |
|
||||
| [Ministral/Mistral-Nemo](https://huggingface.co/mistralai) | 8B/12B | ministral |
|
||||
| [Mistral/Mixtral](https://huggingface.co/mistralai) | 7B/8x7B/8x22B | mistral |
|
||||
| [Mistral Small](https://huggingface.co/mistralai) | 24B | mistral_small |
|
||||
| [OLMo](https://huggingface.co/allenai) | 1B/7B | - |
|
||||
| [PaliGemma/PaliGemma2](https://huggingface.co/google) | 3B/10B/28B | paligemma |
|
||||
| [Phi-1.5/Phi-2](https://huggingface.co/microsoft) | 1.3B/2.7B | - |
|
||||
| [Phi-3/Phi-3.5](https://huggingface.co/microsoft) | 4B/14B | phi |
|
||||
| [Phi-3-small](https://huggingface.co/microsoft) | 7B | phi_small |
|
||||
| [Phi-4](https://huggingface.co/microsoft) | 14B | phi4 |
|
||||
| [Pixtral](https://huggingface.co/mistralai) | 12B | pixtral |
|
||||
| [Qwen (1-2.5) (Code/Math/MoE/QwQ)](https://huggingface.co/Qwen) | 0.5B/1.5B/3B/7B/14B/32B/72B/110B | qwen |
|
||||
| [Qwen3 (MoE)](https://huggingface.co/Qwen) | 0.6B/1.7B/4B/8B/14B/32B/235B | qwen3 |
|
||||
| [Qwen2-Audio](https://huggingface.co/Qwen) | 7B | qwen2_audio |
|
||||
| [Qwen2.5-Omni](https://huggingface.co/Qwen) | 3B/7B | qwen2_omni |
|
||||
| [Qwen2-VL/Qwen2.5-VL/QVQ](https://huggingface.co/Qwen) | 2B/3B/7B/32B/72B | qwen2_vl |
|
||||
| [Seed Coder](https://huggingface.co/ByteDance-Seed) | 8B | seed_coder |
|
||||
| [Skywork o1](https://huggingface.co/Skywork) | 8B | skywork_o1 |
|
||||
| [StarCoder 2](https://huggingface.co/bigcode) | 3B/7B/15B | - |
|
||||
| [TeleChat2](https://huggingface.co/Tele-AI) | 3B/7B/35B/115B | telechat2 |
|
||||
| [XVERSE](https://huggingface.co/xverse) | 7B/13B/65B | xverse |
|
||||
| [Yi/Yi-1.5 (Code)](https://huggingface.co/01-ai) | 1.5B/6B/9B/34B | yi |
|
||||
| [Yi-VL](https://huggingface.co/01-ai) | 6B/34B | yi_vl |
|
||||
| [Yuan 2](https://huggingface.co/IEITYuan) | 2B/51B/102B | yuan |
|
||||
|
||||
> [!NOTE]
|
||||
> For the "base" models, the `template` argument can be chosen from `default`, `alpaca`, `vicuna` etc. But make sure to use the **corresponding template** for the "instruct/chat" models.
|
||||
>
|
||||
> Remember to use the **SAME** template in training and inference.
|
||||
>
|
||||
> \*: You should install the `transformers` from main branch and use `DISABLE_VERSION_CHECK=1` to skip version check.
|
||||
>
|
||||
> \*\*: You need to install a specific version of `transformers` to use the corresponding model.
|
||||
|
||||
Please refer to [constants.py](src/llamafactory/extras/constants.py) for a full list of models we supported.
|
||||
|
||||
@ -271,9 +401,13 @@ You also can add a custom chat template to [template.py](src/llamafactory/data/t
|
||||
- [STEM (zh)](https://huggingface.co/datasets/hfl/stem_zh_instruction)
|
||||
- [Ruozhiba (zh)](https://huggingface.co/datasets/hfl/ruozhiba_gpt4_turbo)
|
||||
- [Neo-sft (zh)](https://huggingface.co/datasets/m-a-p/neo_sft_phase2)
|
||||
- [WebInstructSub (en)](https://huggingface.co/datasets/TIGER-Lab/WebInstructSub)
|
||||
- [Magpie-Pro-300K-Filtered (en)](https://huggingface.co/datasets/Magpie-Align/Magpie-Pro-300K-Filtered)
|
||||
- [Magpie-ultra-v0.1 (en)](https://huggingface.co/datasets/argilla/magpie-ultra-v0.1)
|
||||
- [WebInstructSub (en)](https://huggingface.co/datasets/TIGER-Lab/WebInstructSub)
|
||||
- [OpenO1-SFT (en&zh)](https://huggingface.co/datasets/O1-OPEN/OpenO1-SFT)
|
||||
- [Open-Thoughts (en)](https://huggingface.co/datasets/open-thoughts/OpenThoughts-114k)
|
||||
- [Open-R1-Math (en)](https://huggingface.co/datasets/open-r1/OpenR1-Math-220k)
|
||||
- [Chinese-DeepSeek-R1-Distill (zh)](https://huggingface.co/datasets/Congliu/Chinese-DeepSeek-R1-Distill-data-110k-SFT)
|
||||
- [LLaVA mixed (en&zh)](https://huggingface.co/datasets/BUAADreamer/llava-en-zh-300k)
|
||||
- [Pokemon-gpt4o-captions (en&zh)](https://huggingface.co/datasets/jugg1024/pokemon-gpt4o-captions)
|
||||
- [Open Assistant (de)](https://huggingface.co/datasets/mayflowergmbh/oasst_de)
|
||||
@ -292,8 +426,10 @@ You also can add a custom chat template to [template.py](src/llamafactory/data/t
|
||||
|
||||
- [DPO mixed (en&zh)](https://huggingface.co/datasets/hiyouga/DPO-En-Zh-20k)
|
||||
- [UltraFeedback (en)](https://huggingface.co/datasets/HuggingFaceH4/ultrafeedback_binarized)
|
||||
- [COIG-P (zh)](https://huggingface.co/datasets/m-a-p/COIG-P)
|
||||
- [RLHF-V (en)](https://huggingface.co/datasets/openbmb/RLHF-V-Dataset)
|
||||
- [VLFeedback (en)](https://huggingface.co/datasets/Zhihui/VLFeedback)
|
||||
- [RLAIF-V (en)](https://huggingface.co/datasets/openbmb/RLAIF-V-Dataset)
|
||||
- [Orca DPO Pairs (en)](https://huggingface.co/datasets/Intel/orca_dpo_pairs)
|
||||
- [HH-RLHF (en)](https://huggingface.co/datasets/Anthropic/hh-rlhf)
|
||||
- [Nectar (en)](https://huggingface.co/datasets/berkeley-nest/Nectar)
|
||||
@ -313,35 +449,35 @@ huggingface-cli login
|
||||
|
||||
| Mandatory | Minimum | Recommend |
|
||||
| ------------ | ------- | --------- |
|
||||
| python | 3.8 | 3.11 |
|
||||
| torch | 1.13.1 | 2.4.0 |
|
||||
| transformers | 4.41.2 | 4.43.4 |
|
||||
| datasets | 2.16.0 | 2.20.0 |
|
||||
| accelerate | 0.30.1 | 0.32.0 |
|
||||
| peft | 0.11.1 | 0.12.0 |
|
||||
| python | 3.9 | 3.10 |
|
||||
| torch | 2.0.0 | 2.6.0 |
|
||||
| torchvision | 0.15.0 | 0.21.0 |
|
||||
| transformers | 4.49.0 | 4.50.0 |
|
||||
| datasets | 2.16.0 | 3.2.0 |
|
||||
| accelerate | 0.34.0 | 1.2.1 |
|
||||
| peft | 0.14.0 | 0.15.1 |
|
||||
| trl | 0.8.6 | 0.9.6 |
|
||||
|
||||
| Optional | Minimum | Recommend |
|
||||
| ------------ | ------- | --------- |
|
||||
| CUDA | 11.6 | 12.2 |
|
||||
| deepspeed | 0.10.0 | 0.14.0 |
|
||||
| deepspeed | 0.10.0 | 0.16.4 |
|
||||
| bitsandbytes | 0.39.0 | 0.43.1 |
|
||||
| vllm | 0.4.3 | 0.5.0 |
|
||||
| flash-attn | 2.3.0 | 2.6.3 |
|
||||
| vllm | 0.4.3 | 0.8.2 |
|
||||
| flash-attn | 2.5.6 | 2.7.2 |
|
||||
|
||||
### Hardware Requirement
|
||||
|
||||
\* *estimated*
|
||||
|
||||
| Method | Bits | 7B | 13B | 30B | 70B | 110B | 8x7B | 8x22B |
|
||||
| ----------------- | ---- | ----- | ----- | ----- | ------ | ------ | ----- | ------ |
|
||||
| Full | AMP | 120GB | 240GB | 600GB | 1200GB | 2000GB | 900GB | 2400GB |
|
||||
| Full | 16 | 60GB | 120GB | 300GB | 600GB | 900GB | 400GB | 1200GB |
|
||||
| Freeze | 16 | 20GB | 40GB | 80GB | 200GB | 360GB | 160GB | 400GB |
|
||||
| LoRA/GaLore/BAdam | 16 | 16GB | 32GB | 64GB | 160GB | 240GB | 120GB | 320GB |
|
||||
| QLoRA | 8 | 10GB | 20GB | 40GB | 80GB | 140GB | 60GB | 160GB |
|
||||
| QLoRA | 4 | 6GB | 12GB | 24GB | 48GB | 72GB | 30GB | 96GB |
|
||||
| QLoRA | 2 | 4GB | 8GB | 16GB | 24GB | 48GB | 18GB | 48GB |
|
||||
| Method | Bits | 7B | 14B | 30B | 70B | `x`B |
|
||||
| ------------------------------- | ---- | ----- | ----- | ----- | ------ | ------- |
|
||||
| Full (`bf16` or `fp16`) | 32 | 120GB | 240GB | 600GB | 1200GB | `18x`GB |
|
||||
| Full (`pure_bf16`) | 16 | 60GB | 120GB | 300GB | 600GB | `8x`GB |
|
||||
| Freeze/LoRA/GaLore/APOLLO/BAdam | 16 | 16GB | 32GB | 64GB | 160GB | `2x`GB |
|
||||
| QLoRA | 8 | 10GB | 20GB | 40GB | 80GB | `x`GB |
|
||||
| QLoRA | 4 | 6GB | 12GB | 24GB | 48GB | `x/2`GB |
|
||||
| QLoRA | 2 | 4GB | 8GB | 16GB | 24GB | `x/4`GB |
|
||||
|
||||
## Getting Started
|
||||
|
||||
@ -350,53 +486,99 @@ huggingface-cli login
|
||||
> [!IMPORTANT]
|
||||
> Installation is mandatory.
|
||||
|
||||
#### Install from Source
|
||||
|
||||
```bash
|
||||
git clone --depth 1 https://github.com/hiyouga/LLaMA-Factory.git
|
||||
cd LLaMA-Factory
|
||||
pip install -e ".[torch,metrics]"
|
||||
pip install -e ".[torch,metrics]" --no-build-isolation
|
||||
```
|
||||
|
||||
Extra dependencies available: torch, torch-npu, metrics, deepspeed, liger-kernel, bitsandbytes, hqq, eetq, gptq, awq, aqlm, vllm, galore, badam, adam-mini, qwen, modelscope, quality
|
||||
Extra dependencies available: torch, torch-npu, metrics, deepspeed, liger-kernel, bitsandbytes, hqq, eetq, gptq, aqlm, vllm, sglang, galore, apollo, badam, adam-mini, qwen, minicpm_v, openmind, swanlab, dev
|
||||
|
||||
> [!TIP]
|
||||
> Use `pip install --no-deps -e .` to resolve package conflicts.
|
||||
#### Install from Docker Image
|
||||
|
||||
```bash
|
||||
docker run -it --rm --gpus=all --ipc=host hiyouga/llamafactory:latest
|
||||
```
|
||||
|
||||
This image is built on Ubuntu 22.04 (x86\_64), CUDA 12.4, Python 3.11, PyTorch 2.6.0, and Flash-attn 2.7.4.
|
||||
|
||||
Find the pre-built images: https://hub.docker.com/r/hiyouga/llamafactory/tags
|
||||
|
||||
Please refer to [build docker](#build-docker) to build the image yourself.
|
||||
|
||||
<details><summary>Setting up a virtual environment with <b>uv</b></summary>
|
||||
|
||||
Create an isolated Python environment with [uv](https://github.com/astral-sh/uv):
|
||||
|
||||
```bash
|
||||
uv sync --extra torch --extra metrics --prerelease=allow
|
||||
```
|
||||
|
||||
Run LLaMA-Factory in the isolated environment:
|
||||
|
||||
```bash
|
||||
uv run --prerelease=allow llamafactory-cli train examples/train_lora/llama3_lora_pretrain.yaml
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
<details><summary>For Windows users</summary>
|
||||
|
||||
#### Install PyTorch
|
||||
|
||||
You need to manually install the GPU version of PyTorch on the Windows platform. Please refer to the [official website](https://pytorch.org/get-started/locally/) and the following command to install PyTorch with CUDA support:
|
||||
|
||||
```bash
|
||||
pip uninstall torch torchvision torchaudio
|
||||
pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu126
|
||||
python -c "import torch; print(torch.cuda.is_available())"
|
||||
```
|
||||
|
||||
If you see `True` then you have successfully installed PyTorch with CUDA support.
|
||||
|
||||
Try `dataloader_num_workers: 0` if you encounter `Can't pickle local object` error.
|
||||
|
||||
#### Install BitsAndBytes
|
||||
|
||||
If you want to enable the quantized LoRA (QLoRA) on the Windows platform, you need to install a pre-built version of `bitsandbytes` library, which supports CUDA 11.1 to 12.2, please select the appropriate [release version](https://github.com/jllllll/bitsandbytes-windows-webui/releases/tag/wheels) based on your CUDA version.
|
||||
|
||||
```bash
|
||||
pip install https://github.com/jllllll/bitsandbytes-windows-webui/releases/download/wheels/bitsandbytes-0.41.2.post2-py3-none-win_amd64.whl
|
||||
```
|
||||
|
||||
To enable FlashAttention-2 on the Windows platform, you need to install the precompiled `flash-attn` library, which supports CUDA 12.1 to 12.2. Please download the corresponding version from [flash-attention](https://github.com/bdashore3/flash-attention/releases) based on your requirements.
|
||||
#### Install Flash Attention-2
|
||||
|
||||
To enable FlashAttention-2 on the Windows platform, please use the script from [flash-attention-windows-wheel](https://huggingface.co/lldacing/flash-attention-windows-wheel) to compile and install it by yourself.
|
||||
|
||||
</details>
|
||||
|
||||
<details><summary>For Ascend NPU users</summary>
|
||||
|
||||
To install LLaMA Factory on Ascend NPU devices, please specify extra dependencies: `pip install -e ".[torch-npu,metrics]"`. Additionally, you need to install the **[Ascend CANN Toolkit and Kernels](https://www.hiascend.com/developer/download/community/result?module=cann)**. Please follow the [installation tutorial](https://www.hiascend.com/document/detail/en/CANNCommunityEdition/600alphaX/softwareinstall/instg/atlasdeploy_03_0031.html) or use the following commands:
|
||||
To install LLaMA Factory on Ascend NPU devices, please upgrade Python to version 3.10 or higher and specify extra dependencies: `pip install -e ".[torch-npu,metrics]"`. Additionally, you need to install the **[Ascend CANN Toolkit and Kernels](https://www.hiascend.com/developer/download/community/result?module=cann)**. Please follow the [installation tutorial](https://www.hiascend.com/document/detail/en/CANNCommunityEdition/600alphaX/softwareinstall/instg/atlasdeploy_03_0031.html) or use the following commands:
|
||||
|
||||
```bash
|
||||
# replace the url according to your CANN version and devices
|
||||
# install CANN Toolkit
|
||||
wget https://ascend-repo.obs.cn-east-2.myhuaweicloud.com/Milan-ASL/Milan-ASL%20V100R001C17SPC701/Ascend-cann-toolkit_8.0.RC1.alpha001_linux-"$(uname -i)".run
|
||||
bash Ascend-cann-toolkit_8.0.RC1.alpha001_linux-"$(uname -i)".run --install
|
||||
wget https://ascend-repo.obs.cn-east-2.myhuaweicloud.com/Milan-ASL/Milan-ASL%20V100R001C20SPC702/Ascend-cann-toolkit_8.0.0.alpha002_linux-"$(uname -i)".run
|
||||
bash Ascend-cann-toolkit_8.0.0.alpha002_linux-"$(uname -i)".run --install
|
||||
|
||||
# install CANN Kernels
|
||||
wget https://ascend-repo.obs.cn-east-2.myhuaweicloud.com/Milan-ASL/Milan-ASL%20V100R001C17SPC701/Ascend-cann-kernels-910b_8.0.RC1.alpha001_linux.run
|
||||
bash Ascend-cann-kernels-910b_8.0.RC1.alpha001_linux.run --install
|
||||
wget https://ascend-repo.obs.cn-east-2.myhuaweicloud.com/Milan-ASL/Milan-ASL%20V100R001C20SPC702/Ascend-cann-kernels-910b_8.0.0.alpha002_linux-"$(uname -i)".run
|
||||
bash Ascend-cann-kernels-910b_8.0.0.alpha002_linux-"$(uname -i)".run --install
|
||||
|
||||
# set env variables
|
||||
source /usr/local/Ascend/ascend-toolkit/set_env.sh
|
||||
```
|
||||
|
||||
| Requirement | Minimum | Recommend |
|
||||
| ------------ | ------- | ----------- |
|
||||
| CANN | 8.0.RC1 | 8.0.RC1 |
|
||||
| torch | 2.1.0 | 2.1.0 |
|
||||
| torch-npu | 2.1.0 | 2.1.0.post3 |
|
||||
| deepspeed | 0.13.2 | 0.13.2 |
|
||||
| Requirement | Minimum | Recommend |
|
||||
| ------------ | ------- | -------------- |
|
||||
| CANN | 8.0.RC1 | 8.0.0.alpha002 |
|
||||
| torch | 2.1.0 | 2.4.0 |
|
||||
| torch-npu | 2.1.0 | 2.4.0.post2 |
|
||||
| deepspeed | 0.13.2 | 0.13.2 |
|
||||
| vllm-ascend | - | 0.7.3 |
|
||||
|
||||
Remember to use `ASCEND_RT_VISIBLE_DEVICES` instead of `CUDA_VISIBLE_DEVICES` to specify the device to use.
|
||||
|
||||
@ -404,15 +586,51 @@ If you cannot infer model on NPU devices, try setting `do_sample: false` in the
|
||||
|
||||
Download the pre-built Docker images: [32GB](http://mirrors.cn-central-221.ovaijisuan.com/detail/130.html) | [64GB](http://mirrors.cn-central-221.ovaijisuan.com/detail/131.html)
|
||||
|
||||
#### Install BitsAndBytes
|
||||
|
||||
To use QLoRA based on bitsandbytes on Ascend NPU, please follow these 3 steps:
|
||||
|
||||
1. Manually compile bitsandbytes: Refer to [the installation documentation](https://huggingface.co/docs/bitsandbytes/installation?backend=Ascend+NPU&platform=Ascend+NPU) for the NPU version of bitsandbytes to complete the compilation and installation. The compilation requires a cmake version of at least 3.22.1 and a g++ version of at least 12.x.
|
||||
|
||||
```bash
|
||||
# Install bitsandbytes from source
|
||||
# Clone bitsandbytes repo, Ascend NPU backend is currently enabled on multi-backend-refactor branch
|
||||
git clone -b multi-backend-refactor https://github.com/bitsandbytes-foundation/bitsandbytes.git
|
||||
cd bitsandbytes/
|
||||
|
||||
# Install dependencies
|
||||
pip install -r requirements-dev.txt
|
||||
|
||||
# Install the dependencies for the compilation tools. Note that the commands for this step may vary depending on the operating system. The following are provided for reference
|
||||
apt-get install -y build-essential cmake
|
||||
|
||||
# Compile & install
|
||||
cmake -DCOMPUTE_BACKEND=npu -S .
|
||||
make
|
||||
pip install .
|
||||
```
|
||||
|
||||
2. Install transformers from the main branch.
|
||||
|
||||
```bash
|
||||
git clone -b main https://github.com/huggingface/transformers.git
|
||||
cd transformers
|
||||
pip install .
|
||||
```
|
||||
|
||||
3. Set `double_quantization: false` in the configuration. You can refer to the [example](examples/train_qlora/llama3_lora_sft_bnb_npu.yaml).
|
||||
|
||||
</details>
|
||||
|
||||
### Data Preparation
|
||||
|
||||
Please refer to [data/README.md](data/README.md) for checking the details about the format of dataset files. You can either use datasets on HuggingFace / ModelScope hub or load the dataset in local disk.
|
||||
Please refer to [data/README.md](data/README.md) for checking the details about the format of dataset files. You can use datasets on HuggingFace / ModelScope / Modelers hub, load the dataset in local disk, or specify a path to s3/gcs cloud storage.
|
||||
|
||||
> [!NOTE]
|
||||
> Please update `data/dataset_info.json` to use your custom dataset.
|
||||
|
||||
You can also use **[Easy Dataset](https://github.com/ConardLi/easy-dataset)**, **[DataFlow](https://github.com/OpenDCAI/DataFlow)** and **[GraphGen](https://github.com/open-sciencelab/GraphGen)** to create synthetic data for fine-tuning.
|
||||
|
||||
### Quickstart
|
||||
|
||||
Use the following 3 commands to run LoRA **fine-tuning**, **inference** and **merging** of the Llama3-8B-Instruct model, respectively.
|
||||
@ -427,6 +645,8 @@ See [examples/README.md](examples/README.md) for advanced usage (including distr
|
||||
|
||||
> [!TIP]
|
||||
> Use `llamafactory-cli help` to show help information.
|
||||
>
|
||||
> Read [FAQs](https://github.com/hiyouga/LLaMA-Factory/issues/4614) first if you encounter any problems.
|
||||
|
||||
### Fine-Tuning with LLaMA Board GUI (powered by [Gradio](https://github.com/gradio-app/gradio))
|
||||
|
||||
@ -466,21 +686,13 @@ For CUDA users:
|
||||
|
||||
```bash
|
||||
docker build -f ./docker/docker-cuda/Dockerfile \
|
||||
--build-arg INSTALL_BNB=false \
|
||||
--build-arg INSTALL_VLLM=false \
|
||||
--build-arg INSTALL_DEEPSPEED=false \
|
||||
--build-arg INSTALL_FLASHATTN=false \
|
||||
--build-arg PIP_INDEX=https://pypi.org/simple \
|
||||
--build-arg EXTRAS=metrics \
|
||||
-t llamafactory:latest .
|
||||
|
||||
docker run -dit --gpus=all \
|
||||
-v ./hf_cache:/root/.cache/huggingface \
|
||||
-v ./ms_cache:/root/.cache/modelscope \
|
||||
-v ./data:/app/data \
|
||||
-v ./output:/app/output \
|
||||
docker run -dit --ipc=host --gpus=all \
|
||||
-p 7860:7860 \
|
||||
-p 8000:8000 \
|
||||
--shm-size 16G \
|
||||
--name llamafactory \
|
||||
llamafactory:latest
|
||||
|
||||
@ -490,18 +702,12 @@ docker exec -it llamafactory bash
|
||||
For Ascend NPU users:
|
||||
|
||||
```bash
|
||||
# Choose docker image upon your environment
|
||||
docker build -f ./docker/docker-npu/Dockerfile \
|
||||
--build-arg INSTALL_DEEPSPEED=false \
|
||||
--build-arg PIP_INDEX=https://pypi.org/simple \
|
||||
--build-arg EXTRAS=torch-npu,metrics \
|
||||
-t llamafactory:latest .
|
||||
|
||||
# Change `device` upon your resources
|
||||
docker run -dit \
|
||||
-v ./hf_cache:/root/.cache/huggingface \
|
||||
-v ./ms_cache:/root/.cache/modelscope \
|
||||
-v ./data:/app/data \
|
||||
-v ./output:/app/output \
|
||||
docker run -dit --ipc=host \
|
||||
-v /usr/local/dcmi:/usr/local/dcmi \
|
||||
-v /usr/local/bin/npu-smi:/usr/local/bin/npu-smi \
|
||||
-v /usr/local/Ascend/driver:/usr/local/Ascend/driver \
|
||||
@ -512,7 +718,6 @@ docker run -dit \
|
||||
--device /dev/davinci_manager \
|
||||
--device /dev/devmm_svm \
|
||||
--device /dev/hisi_hdc \
|
||||
--shm-size 16G \
|
||||
--name llamafactory \
|
||||
llamafactory:latest
|
||||
|
||||
@ -523,24 +728,15 @@ For AMD ROCm users:
|
||||
|
||||
```bash
|
||||
docker build -f ./docker/docker-rocm/Dockerfile \
|
||||
--build-arg INSTALL_BNB=false \
|
||||
--build-arg INSTALL_VLLM=false \
|
||||
--build-arg INSTALL_DEEPSPEED=false \
|
||||
--build-arg INSTALL_FLASHATTN=false \
|
||||
--build-arg PIP_INDEX=https://pypi.org/simple \
|
||||
--build-arg EXTRAS=metrics \
|
||||
-t llamafactory:latest .
|
||||
|
||||
docker run -dit \
|
||||
-v ./hf_cache:/root/.cache/huggingface \
|
||||
-v ./ms_cache:/root/.cache/modelscope \
|
||||
-v ./data:/app/data \
|
||||
-v ./output:/app/output \
|
||||
-v ./saves:/app/saves \
|
||||
docker run -dit --ipc=host \
|
||||
-p 7860:7860 \
|
||||
-p 8000:8000 \
|
||||
--device /dev/kfd \
|
||||
--device /dev/dri \
|
||||
--shm-size 16G \
|
||||
--name llamafactory \
|
||||
llamafactory:latest
|
||||
|
||||
@ -549,11 +745,14 @@ docker exec -it llamafactory bash
|
||||
|
||||
</details>
|
||||
|
||||
<details><summary>Details about volume</summary>
|
||||
<details><summary>Use Docker volumes</summary>
|
||||
|
||||
- `hf_cache`: Utilize Hugging Face cache on the host machine. Reassignable if a cache already exists in a different directory.
|
||||
- `ms_cache`: Similar to Hugging Face cache but for ModelScope users.
|
||||
- `data`: Place datasets on this dir of the host machine so that they can be selected on LLaMA Board GUI.
|
||||
You can uncomment `VOLUME [ "/root/.cache/huggingface", "/app/shared_data", "/app/output" ]` in the Dockerfile to use data volumes.
|
||||
|
||||
When building the Docker image, use `-v ./hf_cache:/root/.cache/huggingface` argument to mount the local directory to the container. The following data volumes are available.
|
||||
|
||||
- `hf_cache`: Utilize Hugging Face cache on the host machine.
|
||||
- `shared_data`: The directionary to store datasets on the host machine.
|
||||
- `output`: Set export dir to this location so that the merged result can be accessed directly on the host machine.
|
||||
|
||||
</details>
|
||||
@ -561,11 +760,13 @@ docker exec -it llamafactory bash
|
||||
### Deploy with OpenAI-style API and vLLM
|
||||
|
||||
```bash
|
||||
API_PORT=8000 llamafactory-cli api examples/inference/llama3_vllm.yaml
|
||||
API_PORT=8000 llamafactory-cli api examples/inference/llama3.yaml infer_backend=vllm vllm_enforce_eager=true
|
||||
```
|
||||
|
||||
> [!TIP]
|
||||
> Visit [this page](https://platform.openai.com/docs/api-reference/chat/create) for API document.
|
||||
>
|
||||
> Examples: [Image understanding](scripts/api_example/test_image.py) | [Function calling](scripts/api_example/test_toolcall.py)
|
||||
|
||||
### Download from ModelScope Hub
|
||||
|
||||
@ -577,6 +778,16 @@ export USE_MODELSCOPE_HUB=1 # `set USE_MODELSCOPE_HUB=1` for Windows
|
||||
|
||||
Train the model by specifying a model ID of the ModelScope Hub as the `model_name_or_path`. You can find a full list of model IDs at [ModelScope Hub](https://modelscope.cn/models), e.g., `LLM-Research/Meta-Llama-3-8B-Instruct`.
|
||||
|
||||
### Download from Modelers Hub
|
||||
|
||||
You can also use Modelers Hub to download models and datasets.
|
||||
|
||||
```bash
|
||||
export USE_OPENMIND_HUB=1 # `set USE_OPENMIND_HUB=1` for Windows
|
||||
```
|
||||
|
||||
Train the model by specifying a model ID of the Modelers Hub as the `model_name_or_path`. You can find a full list of model IDs at [Modelers Hub](https://modelers.cn/models), e.g., `TeleAI/TeleChat-7B-pt`.
|
||||
|
||||
### Use W&B Logger
|
||||
|
||||
To use [Weights & Biases](https://wandb.ai) for logging experimental results, you need to add the following arguments to yaml files.
|
||||
@ -588,6 +799,21 @@ run_name: test_run # optional
|
||||
|
||||
Set `WANDB_API_KEY` to [your key](https://wandb.ai/authorize) when launching training tasks to log in with your W&B account.
|
||||
|
||||
### Use SwanLab Logger
|
||||
|
||||
To use [SwanLab](https://github.com/SwanHubX/SwanLab) for logging experimental results, you need to add the following arguments to yaml files.
|
||||
|
||||
```yaml
|
||||
use_swanlab: true
|
||||
swanlab_run_name: test_run # optional
|
||||
```
|
||||
|
||||
When launching training tasks, you can log in to SwanLab in three ways:
|
||||
|
||||
1. Add `swanlab_api_key=<your_api_key>` to the yaml file, and set it to your [API key](https://swanlab.cn/settings).
|
||||
2. Set the environment variable `SWANLAB_API_KEY` to your [API key](https://swanlab.cn/settings).
|
||||
3. Use the `swanlab login` command to complete the login.
|
||||
|
||||
## Projects using LLaMA Factory
|
||||
|
||||
If you have a project that should be incorporated, please contact via email or create a pull request.
|
||||
@ -675,24 +901,30 @@ If you have a project that should be incorporated, please contact via email or c
|
||||
1. Zeng et al. Perceive, Reflect, and Plan: Designing LLM Agent for Goal-Directed City Navigation without Instructions. 2024. [[arxiv]](https://arxiv.org/abs/2408.04168)
|
||||
1. Xia et al. Using Pre-trained Language Model for Accurate ESG Prediction. FinNLP 2024. [[paper]](https://aclanthology.org/2024.finnlp-2.1/)
|
||||
1. Liang et al. I-SHEEP: Self-Alignment of LLM from Scratch through an Iterative Self-Enhancement Paradigm. 2024. [[arxiv]](https://arxiv.org/abs/2408.08072)
|
||||
1. Bai et al. Aligning Large Language Model with Direct Multi-Preference Optimization for Recommendation. CIKM 2024. [[paper]](https://dl.acm.org/doi/10.1145/3627673.3679611)
|
||||
1. Zhang et al. CPsyCoun: A Report-based Multi-turn Dialogue Reconstruction and Evaluation Framework for Chinese Psychological Counseling. ACL 2024. [[paper]](https://aclanthology.org/2024.findings-acl.830.pdf)
|
||||
1. **[StarWhisper](https://github.com/Yu-Yang-Li/StarWhisper)**: A large language model for Astronomy, based on ChatGLM2-6B and Qwen-14B.
|
||||
1. **[DISC-LawLLM](https://github.com/FudanDISC/DISC-LawLLM)**: A large language model specialized in Chinese legal domain, based on Baichuan-13B, is capable of retrieving and reasoning on legal knowledge.
|
||||
1. **[Sunsimiao](https://github.com/X-D-Lab/Sunsimiao)**: A large language model specialized in Chinese medical domain, based on Baichuan-7B and ChatGLM-6B.
|
||||
1. **[CareGPT](https://github.com/WangRongsheng/CareGPT)**: A series of large language models for Chinese medical domain, based on LLaMA2-7B and Baichuan-13B.
|
||||
1. **[MachineMindset](https://github.com/PKU-YuanGroup/Machine-Mindset/)**: A series of MBTI Personality large language models, capable of giving any LLM 16 different personality types based on different datasets and training methods.
|
||||
1. **[Luminia-13B-v3](https://huggingface.co/Nekochu/Luminia-13B-v3)**: A large language model specialized in generate metadata for stable diffusion. [[🤗Demo]](https://huggingface.co/spaces/Nekochu/Luminia-13B_SD_Prompt)
|
||||
1. **[Luminia-13B-v3](https://huggingface.co/Nekochu/Luminia-13B-v3)**: A large language model specialized in generate metadata for stable diffusion. [[demo]](https://huggingface.co/spaces/Nekochu/Luminia-13B_SD_Prompt)
|
||||
1. **[Chinese-LLaVA-Med](https://github.com/BUAADreamer/Chinese-LLaVA-Med)**: A multimodal large language model specialized in Chinese medical domain, based on LLaVA-1.5-7B.
|
||||
1. **[AutoRE](https://github.com/THUDM/AutoRE)**: A document-level relation extraction system based on large language models.
|
||||
1. **[NVIDIA RTX AI Toolkit](https://github.com/NVIDIA/RTX-AI-Toolkit)**: SDKs for fine-tuning LLMs on Windows PC for NVIDIA RTX.
|
||||
1. **[LazyLLM](https://github.com/LazyAGI/LazyLLM)**: An easy and lazy way for building multi-agent LLMs applications and supports model fine-tuning via LLaMA Factory.
|
||||
|
||||
1. **[RAG-Retrieval](https://github.com/NLPJCL/RAG-Retrieval)**: A full pipeline for RAG retrieval model fine-tuning, inference, and distillation. [[blog]](https://zhuanlan.zhihu.com/p/987727357)
|
||||
1. **[360-LLaMA-Factory](https://github.com/Qihoo360/360-LLaMA-Factory)**: A modified library that supports long sequence SFT & DPO using ring attention.
|
||||
1. **[Sky-T1](https://novasky-ai.github.io/posts/sky-t1/)**: An o1-like model fine-tuned by NovaSky AI with very small cost.
|
||||
1. **[WeClone](https://github.com/xming521/WeClone)**: One-stop solution for creating your digital avatar from chat logs.
|
||||
1. **[EmoLLM](https://github.com/SmartFlowAI/EmoLLM)**: A project about large language models (LLMs) and mental health.
|
||||
</details>
|
||||
|
||||
## License
|
||||
|
||||
This repository is licensed under the [Apache-2.0 License](LICENSE).
|
||||
|
||||
Please follow the model licenses to use the corresponding model weights: [Baichuan 2](https://huggingface.co/baichuan-inc/Baichuan2-7B-Base/blob/main/Community%20License%20for%20Baichuan%202%20Model.pdf) / [BLOOM](https://huggingface.co/spaces/bigscience/license) / [ChatGLM3](https://github.com/THUDM/ChatGLM3/blob/main/MODEL_LICENSE) / [Command R](https://cohere.com/c4ai-cc-by-nc-license) / [DeepSeek](https://github.com/deepseek-ai/DeepSeek-LLM/blob/main/LICENSE-MODEL) / [Falcon](https://huggingface.co/tiiuae/falcon-180B/blob/main/LICENSE.txt) / [Gemma](https://ai.google.dev/gemma/terms) / [GLM-4](https://huggingface.co/THUDM/glm-4-9b/blob/main/LICENSE) / [InternLM2](https://github.com/InternLM/InternLM#license) / [Llama](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md) / [Llama 2 (LLaVA-1.5)](https://ai.meta.com/llama/license/) / [Llama 3](https://llama.meta.com/llama3/license/) / [MiniCPM](https://github.com/OpenBMB/MiniCPM/blob/main/MiniCPM%20Model%20License.md) / [Mistral](LICENSE) / [OLMo](LICENSE) / [Phi-1.5/Phi-2](https://huggingface.co/microsoft/phi-1_5/resolve/main/Research%20License.docx) / [Phi-3](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct/blob/main/LICENSE) / [Qwen](https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT) / [StarCoder 2](https://huggingface.co/spaces/bigcode/bigcode-model-license-agreement) / [XVERSE](https://github.com/xverse-ai/XVERSE-13B/blob/main/MODEL_LICENSE.pdf) / [Yi](https://huggingface.co/01-ai/Yi-6B/blob/main/LICENSE) / [Yi-1.5](LICENSE) / [Yuan 2](https://github.com/IEIT-Yuan/Yuan-2.0/blob/main/LICENSE-Yuan)
|
||||
Please follow the model licenses to use the corresponding model weights: [Baichuan 2](https://huggingface.co/baichuan-inc/Baichuan2-7B-Base/blob/main/Community%20License%20for%20Baichuan%202%20Model.pdf) / [BLOOM](https://huggingface.co/spaces/bigscience/license) / [ChatGLM3](https://github.com/THUDM/ChatGLM3/blob/main/MODEL_LICENSE) / [Command R](https://cohere.com/c4ai-cc-by-nc-license) / [DeepSeek](https://github.com/deepseek-ai/DeepSeek-LLM/blob/main/LICENSE-MODEL) / [Falcon](https://huggingface.co/tiiuae/falcon-180B/blob/main/LICENSE.txt) / [Gemma](https://ai.google.dev/gemma/terms) / [GLM-4](https://huggingface.co/THUDM/glm-4-9b/blob/main/LICENSE) / [GPT-2](https://github.com/openai/gpt-2/blob/master/LICENSE) / [Granite](LICENSE) / [Index](https://huggingface.co/IndexTeam/Index-1.9B/blob/main/LICENSE) / [InternLM](https://github.com/InternLM/InternLM#license) / [Llama](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md) / [Llama 2](https://ai.meta.com/llama/license/) / [Llama 3](https://llama.meta.com/llama3/license/) / [Llama 4](https://github.com/meta-llama/llama-models/blob/main/models/llama4/LICENSE) / [MiniCPM](https://github.com/OpenBMB/MiniCPM/blob/main/MiniCPM%20Model%20License.md) / [Mistral/Mixtral/Pixtral](LICENSE) / [OLMo](LICENSE) / [Phi-1.5/Phi-2](https://huggingface.co/microsoft/phi-1_5/resolve/main/Research%20License.docx) / [Phi-3/Phi-4](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct/blob/main/LICENSE) / [Qwen](https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT) / [Skywork](https://huggingface.co/Skywork/Skywork-13B-base/blob/main/Skywork%20Community%20License.pdf) / [StarCoder 2](https://huggingface.co/spaces/bigcode/bigcode-model-license-agreement) / [TeleChat2](https://huggingface.co/Tele-AI/telechat-7B/blob/main/TeleChat%E6%A8%A1%E5%9E%8B%E7%A4%BE%E5%8C%BA%E8%AE%B8%E5%8F%AF%E5%8D%8F%E8%AE%AE.pdf) / [XVERSE](https://github.com/xverse-ai/XVERSE-13B/blob/main/MODEL_LICENSE.pdf) / [Yi](https://huggingface.co/01-ai/Yi-6B/blob/main/LICENSE) / [Yi-1.5](LICENSE) / [Yuan 2](https://github.com/IEIT-Yuan/Yuan-2.0/blob/main/LICENSE-Yuan)
|
||||
|
||||
## Citation
|
||||
|
||||
|
486
README_zh.md
@ -1,46 +1,88 @@
|
||||

|
||||
|
||||
[](https://github.com/hiyouga/LLaMA-Factory/stargazers)
|
||||
[](LICENSE)
|
||||
[](https://github.com/hiyouga/LLaMA-Factory/commits/main)
|
||||
[](https://github.com/hiyouga/LLaMA-Factory/graphs/contributors)
|
||||
[](https://github.com/hiyouga/LLaMA-Factory/actions/workflows/tests.yml)
|
||||
[](https://pypi.org/project/llamafactory/)
|
||||
[](#使用了-llama-factory-的项目)
|
||||
[](https://github.com/hiyouga/LLaMA-Factory/pulls)
|
||||
[](https://discord.gg/rKfvV9r9FK)
|
||||
[](https://scholar.google.com/scholar?cites=12620864006390196564)
|
||||
[](https://hub.docker.com/r/hiyouga/llamafactory/tags)
|
||||
|
||||
[](https://twitter.com/llamafactory_ai)
|
||||
[](https://discord.gg/rKfvV9r9FK)
|
||||
[](https://gitcode.com/zhengyaowei/LLaMA-Factory)
|
||||
|
||||
[](https://colab.research.google.com/drive/1d5KQtbemerlSDSxZIfAaWXhKr30QypiK?usp=sharing)
|
||||
[](https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory)
|
||||
[](https://huggingface.co/spaces/hiyouga/LLaMA-Board)
|
||||
[](https://modelscope.cn/studios/hiyouga/LLaMA-Board)
|
||||
[](https://docs.alayanew.com/docs/documents/newActivities/llamafactory/?utm_source=LLaMA-Factory)
|
||||
[](https://huggingface.co/spaces/hiyouga/LLaMA-Board)
|
||||
[](https://modelscope.cn/studios/hiyouga/LLaMA-Board)
|
||||
[](https://novita.ai/templates-library/105981?sharer=88115474-394e-4bda-968e-b88e123d0c47)
|
||||
|
||||
[](https://trendshift.io/repositories/4535)
|
||||
### 获得[亚马逊](https://aws.amazon.com/cn/blogs/china/a-one-stop-code-free-model-fine-tuning-deployment-platform-based-on-sagemaker-and-llama-factory/)、[英伟达](https://developer.nvidia.cn/rtx/ai-toolkit)、[阿里云](https://help.aliyun.com/zh/pai/use-cases/fine-tune-a-llama-3-model-with-llama-factory)等的应用。
|
||||
|
||||
👋 加入我们的[微信群](assets/wechat.jpg)或 [NPU 用户群](assets/wechat_npu.jpg)。
|
||||
<div align="center" markdown="1">
|
||||
|
||||
### 赞助商 ❤️
|
||||
|
||||
<a href="https://warp.dev/llama-factory">
|
||||
<img alt="Warp sponsorship" width="400" src="https://github.com/user-attachments/assets/ab8dd143-b0fd-4904-bdc5-dd7ecac94eae">
|
||||
</a>
|
||||
|
||||
#### [Warp,面向开发者的智能终端](https://warp.dev/llama-factory)
|
||||
|
||||
[适用于 MacOS、Linux 和 Windows](https://warp.dev/llama-factory)
|
||||
|
||||
----
|
||||
|
||||
### 使用零代码[命令行](#快速开始)与 [Web UI](#llama-board-可视化微调由-gradio-驱动) 轻松微调百余种大模型
|
||||
|
||||

|
||||
|
||||
</div>
|
||||
|
||||
👋 加入我们的[微信群](assets/wechat.jpg)、[NPU 用户群](assets/wechat_npu.jpg)或 [九章智算云算力优惠群](assets/wechat_alaya.png)。
|
||||
|
||||
\[ [English](README.md) | 中文 \]
|
||||
|
||||
**微调大模型可以像这样轻松…**
|
||||
|
||||
https://github.com/user-attachments/assets/e6ce34b0-52d5-4f3e-a830-592106c4c272
|
||||
https://github.com/user-attachments/assets/43b700c6-a178-41db-b1f8-8190a5d3fcfc
|
||||
|
||||
选择你的打开方式:
|
||||
|
||||
- **Colab**:https://colab.research.google.com/drive/1d5KQtbemerlSDSxZIfAaWXhKr30QypiK?usp=sharing
|
||||
- **PAI-DSW**:https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory
|
||||
- **本地机器**:请见[如何使用](#如何使用)
|
||||
- **入门教程**:https://zhuanlan.zhihu.com/p/695287607
|
||||
- **微调视频教程**:https://www.bilibili.com/video/BV1djgRzxEts/
|
||||
- **框架文档**:https://llamafactory.readthedocs.io/zh-cn/latest/
|
||||
- **框架文档(昇腾 NPU)**:https://ascend.github.io/docs/sources/llamafactory/
|
||||
- **Colab(免费)**:https://colab.research.google.com/drive/1d5KQtbemerlSDSxZIfAaWXhKr30QypiK?usp=sharing
|
||||
- **本地机器**:请见[如何使用](#如何使用)
|
||||
- **PAI-DSW(免费试用)**:https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory
|
||||
- **九章智算云(算力优惠活动)**:https://docs.alayanew.com/docs/documents/useGuide/LLaMAFactory/mutiple/?utm_source=LLaMA-Factory
|
||||
|
||||
> [!NOTE]
|
||||
> 除上述链接以外的其他网站均为未经许可的第三方网站,请小心甄别。
|
||||
|
||||
## 目录
|
||||
|
||||
- [项目特色](#项目特色)
|
||||
- [性能指标](#性能指标)
|
||||
- [官方博客](#官方博客)
|
||||
- [更新日志](#更新日志)
|
||||
- [模型](#模型)
|
||||
- [训练方法](#训练方法)
|
||||
- [数据集](#数据集)
|
||||
- [软硬件依赖](#软硬件依赖)
|
||||
- [如何使用](#如何使用)
|
||||
- [安装 LLaMA Factory](#安装-llama-factory)
|
||||
- [数据准备](#数据准备)
|
||||
- [快速开始](#快速开始)
|
||||
- [LLaMA Board 可视化微调](#llama-board-可视化微调由-gradio-驱动)
|
||||
- [构建 Docker](#构建-docker)
|
||||
- [利用 vLLM 部署 OpenAI API](#利用-vllm-部署-openai-api)
|
||||
- [从魔搭社区下载](#从魔搭社区下载)
|
||||
- [从魔乐社区下载](#从魔乐社区下载)
|
||||
- [使用 W&B 面板](#使用-wb-面板)
|
||||
- [使用 SwanLab 面板](#使用-swanlab-面板)
|
||||
- [使用了 LLaMA Factory 的项目](#使用了-llama-factory-的项目)
|
||||
- [协议](#协议)
|
||||
- [引用](#引用)
|
||||
@ -48,39 +90,91 @@ https://github.com/user-attachments/assets/e6ce34b0-52d5-4f3e-a830-592106c4c272
|
||||
|
||||
## 项目特色
|
||||
|
||||
- **多种模型**:LLaMA、LLaVA、Mistral、Mixtral-MoE、Qwen、Qwen2-VL、Yi、Gemma、Baichuan、ChatGLM、Phi 等等。
|
||||
- **多种模型**:LLaMA、LLaVA、Mistral、Mixtral-MoE、Qwen、Qwen2-VL、DeepSeek、Yi、Gemma、ChatGLM、Phi 等等。
|
||||
- **集成方法**:(增量)预训练、(多模态)指令监督微调、奖励模型训练、PPO 训练、DPO 训练、KTO 训练、ORPO 训练等等。
|
||||
- **多种精度**:16 比特全参数微调、冻结微调、LoRA 微调和基于 AQLM/AWQ/GPTQ/LLM.int8/HQQ/EETQ 的 2/3/4/5/6/8 比特 QLoRA 微调。
|
||||
- **先进算法**:[GaLore](https://github.com/jiaweizzhao/GaLore)、[BAdam](https://github.com/Ledzy/BAdam)、[Adam-mini](https://github.com/zyushun/Adam-mini)、DoRA、LongLoRA、LLaMA Pro、Mixture-of-Depths、LoRA+、LoftQ、PiSSA 和 Agent 微调。
|
||||
- **先进算法**:[GaLore](https://github.com/jiaweizzhao/GaLore)、[BAdam](https://github.com/Ledzy/BAdam)、[APOLLO](https://github.com/zhuhanqing/APOLLO)、[Adam-mini](https://github.com/zyushun/Adam-mini)、[Muon](https://github.com/KellerJordan/Muon)、DoRA、LongLoRA、LLaMA Pro、Mixture-of-Depths、LoRA+、LoftQ 和 PiSSA。
|
||||
- **实用技巧**:[FlashAttention-2](https://github.com/Dao-AILab/flash-attention)、[Unsloth](https://github.com/unslothai/unsloth)、[Liger Kernel](https://github.com/linkedin/Liger-Kernel)、RoPE scaling、NEFTune 和 rsLoRA。
|
||||
- **实验监控**:LlamaBoard、TensorBoard、Wandb、MLflow 等等。
|
||||
- **极速推理**:基于 vLLM 的 OpenAI 风格 API、浏览器界面和命令行接口。
|
||||
- **广泛任务**:多轮对话、工具调用、图像理解、视觉定位、视频识别和语音理解等等。
|
||||
- **实验监控**:LlamaBoard、TensorBoard、Wandb、MLflow、[SwanLab](https://github.com/SwanHubX/SwanLab) 等等。
|
||||
- **极速推理**:基于 [vLLM](https://github.com/vllm-project/vllm) 或 [SGLang](https://github.com/sgl-project/sglang) 的 OpenAI 风格 API、浏览器界面和命令行接口。
|
||||
|
||||
## 性能指标
|
||||
### 最新模型的 Day-N 微调适配
|
||||
|
||||
与 ChatGLM 官方的 [P-Tuning](https://github.com/THUDM/ChatGLM2-6B/tree/main/ptuning) 微调相比,LLaMA Factory 的 LoRA 微调提供了 **3.7 倍**的加速比,同时在广告文案生成任务上取得了更高的 Rouge 分数。结合 4 比特量化技术,LLaMA Factory 的 QLoRA 微调进一步降低了 GPU 显存消耗。
|
||||
| 适配时间 | 模型名称 |
|
||||
| ------------ | -------------------------------------------------------------------- |
|
||||
| Day 0 | Qwen3 / Qwen2.5-VL / Gemma 3 / GLM-4.1V / InternLM 3 / MiniCPM-o-2.6 |
|
||||
| Day 1 | Llama 3 / GLM-4 / Mistral Small / PaliGemma2 / Llama 4 |
|
||||
|
||||

|
||||
## 官方博客
|
||||
|
||||
<details><summary>变量定义</summary>
|
||||
- [使用 LLaMA-Factory 微调 Llama3.1-70B 医学诊断模型](https://docs.alayanew.com/docs/documents/bestPractice/bigModel/llama70B/)(中文)
|
||||
- [基于 LLaMA-Factory 和 EasyR1 打造一站式无代码大模型强化学习和部署平台 LLM Model Hub](https://aws.amazon.com/cn/blogs/china/building-llm-model-hub-based-on-llamafactory-and-easyr1/)(中文)
|
||||
- [通过亚马逊 SageMaker HyperPod 上的 LLaMA-Factory 增强多模态模型银行文档的视觉信息提取](https://aws.amazon.com/cn/blogs/machine-learning/how-apoidea-group-enhances-visual-information-extraction-from-banking-documents-with-multimodal-models-using-llama-factory-on-amazon-sagemaker-hyperpod/)(英文)
|
||||
- [Easy Dataset × LLaMA Factory: 让大模型高效学习领域知识](https://buaa-act.feishu.cn/wiki/KY9xwTGs1iqHrRkjXBwcZP9WnL9)(中文)
|
||||
|
||||
- **Training Speed**: 训练阶段每秒处理的样本数量。(批处理大小=4,截断长度=1024)
|
||||
- **Rouge Score**: [广告文案生成](https://aclanthology.org/D19-1321.pdf)任务验证集上的 Rouge-2 分数。(批处理大小=4,截断长度=1024)
|
||||
- **GPU Memory**: 4 比特量化训练的 GPU 显存峰值。(批处理大小=1,截断长度=1024)
|
||||
- 我们在 ChatGLM 的 P-Tuning 中采用 `pre_seq_len=128`,在 LLaMA Factory 的 LoRA 微调中采用 `lora_rank=32`。
|
||||
<details><summary>全部博客</summary>
|
||||
|
||||
- [使用 LLaMA-Factory 微调 Qwen2.5-VL 实现自动驾驶场景微调](https://docs.alayanew.com/docs/documents/useGuide/LLaMAFactory/mutiple/?utm_source=LLaMA-Factory)(中文)
|
||||
- [LLaMA Factory:微调 DeepSeek-R1-Distill-Qwen-7B 模型实现新闻标题分类器](https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory_deepseek_r1_distill_7b)(中文)
|
||||
- [基于 Amazon SageMaker 和 LLaMA-Factory 打造一站式无代码模型微调部署平台 Model Hub](https://aws.amazon.com/cn/blogs/china/a-one-stop-code-free-model-fine-tuning-deployment-platform-based-on-sagemaker-and-llama-factory/)(中文)
|
||||
- [LLaMA Factory 多模态微调实践:微调 Qwen2-VL 构建文旅大模型](https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory_qwen2vl)(中文)
|
||||
- [LLaMA Factory:微调 Llama3 模型实现角色扮演](https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory)(中文)
|
||||
|
||||
</details>
|
||||
|
||||
## 更新日志
|
||||
|
||||
[25/07/02] 我们支持了 **[GLM-4.1V-9B-Thinking](https://github.com/THUDM/GLM-4.1V-Thinking)** 模型的微调。请安装 transformers 的 main 分支版本以使用。
|
||||
|
||||
[25/04/28] 我们支持了 **[Qwen3](https://qwenlm.github.io/blog/qwen3/)** 系列模型的微调。
|
||||
|
||||
[25/04/21] 我们支持了 **[Muon](https://github.com/KellerJordan/Muon)** 优化器。详细用法请参照 [examples](examples/README_zh.md)。感谢 [@tianshijing](https://github.com/tianshijing) 的 PR。
|
||||
|
||||
[25/04/16] 我们支持了 **[InternVL3](https://huggingface.co/OpenGVLab/InternVL3-8B)** 模型的微调。查看 [PR #7258](https://github.com/hiyouga/LLaMA-Factory/pull/7258) 以使用。
|
||||
|
||||
[25/04/14] 我们支持了 **[GLM-Z1](https://huggingface.co/THUDM/GLM-Z1-9B-0414)** 和 **[Kimi-VL](https://huggingface.co/moonshotai/Kimi-VL-A3B-Instruct)** 模型的微调。
|
||||
|
||||
[25/04/06] 我们支持了 **[Llama 4](https://ai.meta.com/blog/llama-4-multimodal-intelligence/)** 模型的微调。查看 [PR #7611](https://github.com/hiyouga/LLaMA-Factory/pull/7611) 以使用。
|
||||
|
||||
<details><summary>展开日志</summary>
|
||||
|
||||
[25/03/31] 我们支持了 **[Qwen2.5 Omni](https://qwenlm.github.io/blog/qwen2.5-omni/)** 模型的微调。查看 [PR #7537](https://github.com/hiyouga/LLaMA-Factory/pull/7537) 以使用。
|
||||
|
||||
[25/03/15] 我们支持了 **[SGLang](https://github.com/sgl-project/sglang)** 推理后端,请使用 `infer_backend: sglang` 启用。
|
||||
|
||||
[25/03/12] 我们支持了 **[Gemma 3](https://huggingface.co/blog/gemma3)** 模型的微调。
|
||||
|
||||
[25/02/24] 我们宣布开源 **[EasyR1](https://github.com/hiyouga/EasyR1)**,一个高效可扩展的多模态强化学习框架,支持高效的 GRPO 训练。
|
||||
|
||||
[25/02/11] 我们支持了在导出模型时保存 **[Ollama](https://github.com/ollama/ollama)** 配置文件。详细用法请参照 [examples](examples/README_zh.md)。
|
||||
|
||||
[25/02/05] 我们支持了在语音理解任务上微调 **[Qwen2-Audio](Qwen/Qwen2-Audio-7B-Instruct)** 和 **[MiniCPM-o-2.6](https://huggingface.co/openbmb/MiniCPM-o-2_6)** 模型。
|
||||
|
||||
[25/01/31] 我们支持了 **[DeepSeek-R1](https://huggingface.co/deepseek-ai/DeepSeek-R1)** 和 **[Qwen2.5-VL](https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct)** 模型的微调。
|
||||
|
||||
[25/01/15] 我们支持了 **[APOLLO](https://arxiv.org/abs/2412.05270)** 优化器。详细用法请参照 [examples](examples/README_zh.md)。
|
||||
|
||||
[25/01/14] 我们支持了 **[MiniCPM-o-2.6](https://huggingface.co/openbmb/MiniCPM-o-2_6)** 和 **[MiniCPM-V-2.6](https://huggingface.co/openbmb/MiniCPM-V-2_6)** 模型的微调。 感谢 [@BUAADreamer](https://github.com/BUAADreamer) 的 PR.
|
||||
|
||||
[25/01/14] 我们支持了 **[InternLM 3](https://huggingface.co/collections/internlm/)** 模型的微调。感谢 [@hhaAndroid](https://github.com/hhaAndroid) 的 PR。
|
||||
|
||||
[25/01/10] 我们支持了 **[Phi-4](https://huggingface.co/microsoft/phi-4)** 模型的微调。
|
||||
|
||||
[24/12/21] 我们支持了使用 **[SwanLab](https://github.com/SwanHubX/SwanLab)** 跟踪与可视化实验。详细用法请参考 [此部分](#使用-swanlab-面板)。
|
||||
|
||||
[24/11/27] 我们支持了 **[Skywork-o1](https://huggingface.co/Skywork/Skywork-o1-Open-Llama-3.1-8B)** 模型的微调和 **[OpenO1](https://huggingface.co/datasets/O1-OPEN/OpenO1-SFT)** 数据集。
|
||||
|
||||
[24/10/09] 我们支持了从 **[魔乐社区](https://modelers.cn/models)** 下载预训练模型和数据集。详细用法请参照 [此教程](#从魔乐社区下载)。
|
||||
|
||||
[24/09/19] 我们支持了 **[Qwen2.5](https://qwenlm.github.io/blog/qwen2.5/)** 模型的微调。
|
||||
|
||||
[24/08/30] 我们支持了 **[Qwen2-VL](https://qwenlm.github.io/blog/qwen2-vl/)** 模型的微调。感谢 [@simonJJJ](https://github.com/simonJJJ) 的 PR。
|
||||
|
||||
[24/08/27] 我们支持了 **[Liger Kernel](https://github.com/linkedin/Liger-Kernel)**。请使用 `enable_liger_kernel: true` 来加速训练。
|
||||
|
||||
[24/08/09] 我们支持了 **[Adam-mini](https://github.com/zyushun/Adam-mini)** 优化器。详细用法请参照 [examples](examples/README_zh.md)。感谢 [@relic-yuexi](https://github.com/relic-yuexi) 的 PR。
|
||||
|
||||
<details><summary>展开日志</summary>
|
||||
|
||||
[24/07/04] 我们支持了[无污染打包训练](https://github.com/MeetKai/functionary/tree/main/functionary/train/packing)。请使用 `neat_packing: true` 参数。感谢 [@chuan298](https://github.com/chuan298) 的 PR。
|
||||
|
||||
[24/06/16] 我们支持了 **[PiSSA](https://arxiv.org/abs/2404.02948)** 算法。详细用法请参照 [examples](examples/README_zh.md)。
|
||||
@ -159,41 +253,78 @@ https://github.com/user-attachments/assets/e6ce34b0-52d5-4f3e-a830-592106c4c272
|
||||
|
||||
</details>
|
||||
|
||||
> [!TIP]
|
||||
> 如果您无法使用最新的功能,请尝试重新拉取代码并再次安装 LLaMA-Factory。
|
||||
|
||||
## 模型
|
||||
|
||||
| 模型名 | 模型大小 | Template |
|
||||
| ----------------------------------------------------------------- | -------------------------------- | --------- |
|
||||
| [Baichuan 2](https://huggingface.co/baichuan-inc) | 7B/13B | baichuan2 |
|
||||
| [BLOOM/BLOOMZ](https://huggingface.co/bigscience) | 560M/1.1B/1.7B/3B/7.1B/176B | - |
|
||||
| [ChatGLM3](https://huggingface.co/THUDM) | 6B | chatglm3 |
|
||||
| [Command R](https://huggingface.co/CohereForAI) | 35B/104B | cohere |
|
||||
| [DeepSeek (Code/MoE)](https://huggingface.co/deepseek-ai) | 7B/16B/67B/236B | deepseek |
|
||||
| [Falcon](https://huggingface.co/tiiuae) | 7B/11B/40B/180B | falcon |
|
||||
| [Gemma/Gemma 2/CodeGemma](https://huggingface.co/google) | 2B/7B/9B/27B | gemma |
|
||||
| [GLM-4](https://huggingface.co/THUDM) | 9B | glm4 |
|
||||
| [InternLM2/InternLM2.5](https://huggingface.co/internlm) | 7B/20B | intern2 |
|
||||
| [Llama](https://github.com/facebookresearch/llama) | 7B/13B/33B/65B | - |
|
||||
| [Llama 2](https://huggingface.co/meta-llama) | 7B/13B/70B | llama2 |
|
||||
| [Llama 3/Llama 3.1](https://huggingface.co/meta-llama) | 8B/70B | llama3 |
|
||||
| [LLaVA-1.5](https://huggingface.co/llava-hf) | 7B/13B | llava |
|
||||
| [MiniCPM](https://huggingface.co/openbmb) | 1B/2B/4B | cpm/cpm3 |
|
||||
| [Mistral/Mixtral](https://huggingface.co/mistralai) | 7B/8x7B/8x22B | mistral |
|
||||
| [OLMo](https://huggingface.co/allenai) | 1B/7B | - |
|
||||
| [PaliGemma](https://huggingface.co/google) | 3B | paligemma |
|
||||
| [Phi-1.5/Phi-2](https://huggingface.co/microsoft) | 1.3B/2.7B | - |
|
||||
| [Phi-3](https://huggingface.co/microsoft) | 4B/7B/14B | phi |
|
||||
| [Qwen/Qwen1.5/Qwen2 (Code/Math/MoE)](https://huggingface.co/Qwen) | 0.5B/1.5B/4B/7B/14B/32B/72B/110B | qwen |
|
||||
| [Qwen2-VL](https://huggingface.co/Qwen) | 2B/7B | qwen2_vl |
|
||||
| [StarCoder 2](https://huggingface.co/bigcode) | 3B/7B/15B | - |
|
||||
| [XVERSE](https://huggingface.co/xverse) | 7B/13B/65B | xverse |
|
||||
| [Yi/Yi-1.5 (Code)](https://huggingface.co/01-ai) | 1.5B/6B/9B/34B | yi |
|
||||
| [Yi-VL](https://huggingface.co/01-ai) | 6B/34B | yi_vl |
|
||||
| [Yuan 2](https://huggingface.co/IEITYuan) | 2B/51B/102B | yuan |
|
||||
| 模型名 | 参数量 | Template |
|
||||
| ----------------------------------------------------------------- | -------------------------------- | ------------------- |
|
||||
| [Baichuan 2](https://huggingface.co/baichuan-inc) | 7B/13B | baichuan2 |
|
||||
| [BLOOM/BLOOMZ](https://huggingface.co/bigscience) | 560M/1.1B/1.7B/3B/7.1B/176B | - |
|
||||
| [ChatGLM3](https://huggingface.co/THUDM) | 6B | chatglm3 |
|
||||
| [Command R](https://huggingface.co/CohereForAI) | 35B/104B | cohere |
|
||||
| [DeepSeek (Code/MoE)](https://huggingface.co/deepseek-ai) | 7B/16B/67B/236B | deepseek |
|
||||
| [DeepSeek 2.5/3](https://huggingface.co/deepseek-ai) | 236B/671B | deepseek3 |
|
||||
| [DeepSeek R1 (Distill)](https://huggingface.co/deepseek-ai) | 1.5B/7B/8B/14B/32B/70B/671B | deepseekr1 |
|
||||
| [Falcon](https://huggingface.co/tiiuae) | 7B/11B/40B/180B | falcon |
|
||||
| [Falcon-H1](https://huggingface.co/tiiuae) | 0.5B/1.5B/3B/7B/34B | falcon_h1 |
|
||||
| [Gemma/Gemma 2/CodeGemma](https://huggingface.co/google) | 2B/7B/9B/27B | gemma/gemma2 |
|
||||
| [Gemma 3/Gemma 3n](https://huggingface.co/google) | 1B/4B/6B/8B/12B/27B | gemma3/gemma3n |
|
||||
| [GLM-4/GLM-4-0414/GLM-Z1](https://huggingface.co/zai-org) | 9B/32B | glm4/glmz1 |
|
||||
| [GLM-4.1V](https://huggingface.co/zai-org)* | 9B | glm4v |
|
||||
| [GLM-4.5](https://huggingface.co/zai-org)* | 106B/355B | glm4_moe |
|
||||
| [GPT-2](https://huggingface.co/openai-community) | 0.1B/0.4B/0.8B/1.5B | - |
|
||||
| [Granite 3.0-3.3](https://huggingface.co/ibm-granite) | 1B/2B/3B/8B | granite3 |
|
||||
| [Granite 4](https://huggingface.co/ibm-granite) | 7B | granite4 |
|
||||
| [Hunyuan](https://huggingface.co/tencent/) | 7B | hunyuan |
|
||||
| [Index](https://huggingface.co/IndexTeam) | 1.9B | index |
|
||||
| [InternLM 2-3](https://huggingface.co/internlm) | 7B/8B/20B | intern2 |
|
||||
| [InternVL 2.5-3](https://huggingface.co/OpenGVLab) | 1B/2B/8B/14B/38B/78B | intern_vl |
|
||||
| [Kimi-VL](https://huggingface.co/moonshotai) | 16B | kimi_vl |
|
||||
| [Llama](https://github.com/facebookresearch/llama) | 7B/13B/33B/65B | - |
|
||||
| [Llama 2](https://huggingface.co/meta-llama) | 7B/13B/70B | llama2 |
|
||||
| [Llama 3-3.3](https://huggingface.co/meta-llama) | 1B/3B/8B/70B | llama3 |
|
||||
| [Llama 4](https://huggingface.co/meta-llama) | 109B/402B | llama4 |
|
||||
| [Llama 3.2 Vision](https://huggingface.co/meta-llama) | 11B/90B | mllama |
|
||||
| [LLaVA-1.5](https://huggingface.co/llava-hf) | 7B/13B | llava |
|
||||
| [LLaVA-NeXT](https://huggingface.co/llava-hf) | 7B/8B/13B/34B/72B/110B | llava_next |
|
||||
| [LLaVA-NeXT-Video](https://huggingface.co/llava-hf) | 7B/34B | llava_next_video |
|
||||
| [MiMo](https://huggingface.co/XiaomiMiMo) | 7B | mimo |
|
||||
| [MiniCPM](https://huggingface.co/openbmb) | 0.5B/1B/2B/4B/8B | cpm/cpm3/cpm4 |
|
||||
| [MiniCPM-o-2.6/MiniCPM-V-2.6](https://huggingface.co/openbmb) | 8B | minicpm_o/minicpm_v |
|
||||
| [Ministral/Mistral-Nemo](https://huggingface.co/mistralai) | 8B/12B | ministral |
|
||||
| [Mistral/Mixtral](https://huggingface.co/mistralai) | 7B/8x7B/8x22B | mistral |
|
||||
| [Mistral Small](https://huggingface.co/mistralai) | 24B | mistral_small |
|
||||
| [OLMo](https://huggingface.co/allenai) | 1B/7B | - |
|
||||
| [PaliGemma/PaliGemma2](https://huggingface.co/google) | 3B/10B/28B | paligemma |
|
||||
| [Phi-1.5/Phi-2](https://huggingface.co/microsoft) | 1.3B/2.7B | - |
|
||||
| [Phi-3/Phi-3.5](https://huggingface.co/microsoft) | 4B/14B | phi |
|
||||
| [Phi-3-small](https://huggingface.co/microsoft) | 7B | phi_small |
|
||||
| [Phi-4](https://huggingface.co/microsoft) | 14B | phi4 |
|
||||
| [Pixtral](https://huggingface.co/mistralai) | 12B | pixtral |
|
||||
| [Qwen (1-2.5) (Code/Math/MoE/QwQ)](https://huggingface.co/Qwen) | 0.5B/1.5B/3B/7B/14B/32B/72B/110B | qwen |
|
||||
| [Qwen3 (MoE)](https://huggingface.co/Qwen) | 0.6B/1.7B/4B/8B/14B/32B/235B | qwen3 |
|
||||
| [Qwen2-Audio](https://huggingface.co/Qwen) | 7B | qwen2_audio |
|
||||
| [Qwen2.5-Omni](https://huggingface.co/Qwen) | 3B/7B | qwen2_omni |
|
||||
| [Qwen2-VL/Qwen2.5-VL/QVQ](https://huggingface.co/Qwen) | 2B/3B/7B/32B/72B | qwen2_vl |
|
||||
| [Seed Coder](https://huggingface.co/ByteDance-Seed) | 8B | seed_coder |
|
||||
| [Skywork o1](https://huggingface.co/Skywork) | 8B | skywork_o1 |
|
||||
| [StarCoder 2](https://huggingface.co/bigcode) | 3B/7B/15B | - |
|
||||
| [TeleChat2](https://huggingface.co/Tele-AI) | 3B/7B/35B/115B | telechat2 |
|
||||
| [XVERSE](https://huggingface.co/xverse) | 7B/13B/65B | xverse |
|
||||
| [Yi/Yi-1.5 (Code)](https://huggingface.co/01-ai) | 1.5B/6B/9B/34B | yi |
|
||||
| [Yi-VL](https://huggingface.co/01-ai) | 6B/34B | yi_vl |
|
||||
| [Yuan 2](https://huggingface.co/IEITYuan) | 2B/51B/102B | yuan |
|
||||
|
||||
> [!NOTE]
|
||||
> 对于所有“基座”(Base)模型,`template` 参数可以是 `default`, `alpaca`, `vicuna` 等任意值。但“对话”(Instruct/Chat)模型请务必使用**对应的模板**。
|
||||
>
|
||||
> 请务必在训练和推理时采用**完全一致**的模板。
|
||||
>
|
||||
> \*:您需要从 main 分支安装 `transformers` 并使用 `DISABLE_VERSION_CHECK=1` 来跳过版本检查。
|
||||
>
|
||||
> \*\*:您需要安装特定版本的 `transformers` 以使用该模型。
|
||||
|
||||
项目所支持模型的完整列表请参阅 [constants.py](src/llamafactory/extras/constants.py)。
|
||||
|
||||
@ -202,7 +333,7 @@ https://github.com/user-attachments/assets/e6ce34b0-52d5-4f3e-a830-592106c4c272
|
||||
## 训练方法
|
||||
|
||||
| 方法 | 全参数训练 | 部分参数训练 | LoRA | QLoRA |
|
||||
| ---------------------- | ------------------ | ------------------ | ------------------ | ------------------ |
|
||||
| --------------------- | ------------------ | ------------------ | ------------------ | ------------------ |
|
||||
| 预训练 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
||||
| 指令监督微调 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
||||
| 奖励模型训练 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
||||
@ -272,9 +403,13 @@ https://github.com/user-attachments/assets/e6ce34b0-52d5-4f3e-a830-592106c4c272
|
||||
- [STEM (zh)](https://huggingface.co/datasets/hfl/stem_zh_instruction)
|
||||
- [Ruozhiba (zh)](https://huggingface.co/datasets/hfl/ruozhiba_gpt4_turbo)
|
||||
- [Neo-sft (zh)](https://huggingface.co/datasets/m-a-p/neo_sft_phase2)
|
||||
- [WebInstructSub (en)](https://huggingface.co/datasets/TIGER-Lab/WebInstructSub)
|
||||
- [Magpie-Pro-300K-Filtered (en)](https://huggingface.co/datasets/Magpie-Align/Magpie-Pro-300K-Filtered)
|
||||
- [Magpie-ultra-v0.1 (en)](https://huggingface.co/datasets/argilla/magpie-ultra-v0.1)
|
||||
- [WebInstructSub (en)](https://huggingface.co/datasets/TIGER-Lab/WebInstructSub)
|
||||
- [OpenO1-SFT (en&zh)](https://huggingface.co/datasets/O1-OPEN/OpenO1-SFT)
|
||||
- [Open-Thoughts (en)](https://huggingface.co/datasets/open-thoughts/OpenThoughts-114k)
|
||||
- [Open-R1-Math (en)](https://huggingface.co/datasets/open-r1/OpenR1-Math-220k)
|
||||
- [Chinese-DeepSeek-R1-Distill (zh)](https://huggingface.co/datasets/Congliu/Chinese-DeepSeek-R1-Distill-data-110k-SFT)
|
||||
- [LLaVA mixed (en&zh)](https://huggingface.co/datasets/BUAADreamer/llava-en-zh-300k)
|
||||
- [Pokemon-gpt4o-captions (en&zh)](https://huggingface.co/datasets/jugg1024/pokemon-gpt4o-captions)
|
||||
- [Open Assistant (de)](https://huggingface.co/datasets/mayflowergmbh/oasst_de)
|
||||
@ -293,8 +428,10 @@ https://github.com/user-attachments/assets/e6ce34b0-52d5-4f3e-a830-592106c4c272
|
||||
|
||||
- [DPO mixed (en&zh)](https://huggingface.co/datasets/hiyouga/DPO-En-Zh-20k)
|
||||
- [UltraFeedback (en)](https://huggingface.co/datasets/HuggingFaceH4/ultrafeedback_binarized)
|
||||
- [COIG-P (zh)](https://huggingface.co/datasets/m-a-p/COIG-P)
|
||||
- [RLHF-V (en)](https://huggingface.co/datasets/openbmb/RLHF-V-Dataset)
|
||||
- [VLFeedback (en)](https://huggingface.co/datasets/Zhihui/VLFeedback)
|
||||
- [RLAIF-V (en)](https://huggingface.co/datasets/openbmb/RLAIF-V-Dataset)
|
||||
- [Orca DPO Pairs (en)](https://huggingface.co/datasets/Intel/orca_dpo_pairs)
|
||||
- [HH-RLHF (en)](https://huggingface.co/datasets/Anthropic/hh-rlhf)
|
||||
- [Nectar (en)](https://huggingface.co/datasets/berkeley-nest/Nectar)
|
||||
@ -312,37 +449,37 @@ huggingface-cli login
|
||||
|
||||
## 软硬件依赖
|
||||
|
||||
| 必需项 | 至少 | 推荐 |
|
||||
| 必需项 | 至少 | 推荐 |
|
||||
| ------------ | ------- | --------- |
|
||||
| python | 3.8 | 3.11 |
|
||||
| torch | 1.13.1 | 2.4.0 |
|
||||
| transformers | 4.41.2 | 4.43.4 |
|
||||
| datasets | 2.16.0 | 2.20.0 |
|
||||
| accelerate | 0.30.1 | 0.32.0 |
|
||||
| peft | 0.11.1 | 0.12.0 |
|
||||
| python | 3.9 | 3.10 |
|
||||
| torch | 2.0.0 | 2.6.0 |
|
||||
| torchvision | 0.15.0 | 0.21.0 |
|
||||
| transformers | 4.49.0 | 4.50.0 |
|
||||
| datasets | 2.16.0 | 3.2.0 |
|
||||
| accelerate | 0.34.0 | 1.2.1 |
|
||||
| peft | 0.14.0 | 0.15.1 |
|
||||
| trl | 0.8.6 | 0.9.6 |
|
||||
|
||||
| 可选项 | 至少 | 推荐 |
|
||||
| 可选项 | 至少 | 推荐 |
|
||||
| ------------ | ------- | --------- |
|
||||
| CUDA | 11.6 | 12.2 |
|
||||
| deepspeed | 0.10.0 | 0.14.0 |
|
||||
| deepspeed | 0.10.0 | 0.16.4 |
|
||||
| bitsandbytes | 0.39.0 | 0.43.1 |
|
||||
| vllm | 0.4.3 | 0.5.0 |
|
||||
| flash-attn | 2.3.0 | 2.6.3 |
|
||||
| vllm | 0.4.3 | 0.8.2 |
|
||||
| flash-attn | 2.5.6 | 2.7.2 |
|
||||
|
||||
### 硬件依赖
|
||||
|
||||
\* *估算值*
|
||||
|
||||
| 方法 | 精度 | 7B | 13B | 30B | 70B | 110B | 8x7B | 8x22B |
|
||||
| ----------------- | ---- | ----- | ----- | ----- | ------ | ------ | ----- | ------ |
|
||||
| Full | AMP | 120GB | 240GB | 600GB | 1200GB | 2000GB | 900GB | 2400GB |
|
||||
| Full | 16 | 60GB | 120GB | 300GB | 600GB | 900GB | 400GB | 1200GB |
|
||||
| Freeze | 16 | 20GB | 40GB | 80GB | 200GB | 360GB | 160GB | 400GB |
|
||||
| LoRA/GaLore/BAdam | 16 | 16GB | 32GB | 64GB | 160GB | 240GB | 120GB | 320GB |
|
||||
| QLoRA | 8 | 10GB | 20GB | 40GB | 80GB | 140GB | 60GB | 160GB |
|
||||
| QLoRA | 4 | 6GB | 12GB | 24GB | 48GB | 72GB | 30GB | 96GB |
|
||||
| QLoRA | 2 | 4GB | 8GB | 16GB | 24GB | 48GB | 18GB | 48GB |
|
||||
| 方法 | 精度 | 7B | 14B | 30B | 70B | `x`B |
|
||||
| ------------------------------- | ---- | ----- | ----- | ----- | ------ | ------- |
|
||||
| Full (`bf16` or `fp16`) | 32 | 120GB | 240GB | 600GB | 1200GB | `18x`GB |
|
||||
| Full (`pure_bf16`) | 16 | 60GB | 120GB | 300GB | 600GB | `8x`GB |
|
||||
| Freeze/LoRA/GaLore/APOLLO/BAdam | 16 | 16GB | 32GB | 64GB | 160GB | `2x`GB |
|
||||
| QLoRA | 8 | 10GB | 20GB | 40GB | 80GB | `x`GB |
|
||||
| QLoRA | 4 | 6GB | 12GB | 24GB | 48GB | `x/2`GB |
|
||||
| QLoRA | 2 | 4GB | 8GB | 16GB | 24GB | `x/4`GB |
|
||||
|
||||
## 如何使用
|
||||
|
||||
@ -351,32 +488,77 @@ huggingface-cli login
|
||||
> [!IMPORTANT]
|
||||
> 此步骤为必需。
|
||||
|
||||
#### 从源码安装
|
||||
|
||||
```bash
|
||||
git clone --depth 1 https://github.com/hiyouga/LLaMA-Factory.git
|
||||
cd LLaMA-Factory
|
||||
pip install -e ".[torch,metrics]"
|
||||
pip install -e ".[torch,metrics]" --no-build-isolation
|
||||
```
|
||||
|
||||
可选的额外依赖项:torch、torch-npu、metrics、deepspeed、liger-kernel、bitsandbytes、hqq、eetq、gptq、awq、aqlm、vllm、galore、badam、adam-mini、qwen、modelscope、quality
|
||||
可选的额外依赖项:torch、torch-npu、metrics、deepspeed、liger-kernel、bitsandbytes、hqq、eetq、gptq、aqlm、vllm、sglang、galore、apollo、badam、adam-mini、qwen、minicpm_v、openmind、swanlab、dev
|
||||
|
||||
> [!TIP]
|
||||
> 遇到包冲突时,可使用 `pip install --no-deps -e .` 解决。
|
||||
#### 从镜像安装
|
||||
|
||||
```bash
|
||||
docker run -it --rm --gpus=all --ipc=host hiyouga/llamafactory:latest
|
||||
```
|
||||
|
||||
该镜像基于 Ubuntu 22.04(x86\_64)、CUDA 12.4、Python 3.11、PyTorch 2.6.0 和 Flash-attn 2.7.4 构建。
|
||||
|
||||
查看全部镜像:https://hub.docker.com/r/hiyouga/llamafactory/tags
|
||||
|
||||
请参阅[构建 Docker](#构建-docker) 来重新构建镜像。
|
||||
|
||||
<details><summary>使用 <b>uv</b> 构建虚拟环境</summary>
|
||||
|
||||
使用 [uv](https://github.com/astral-sh/uv) 创建隔离的 Python 环境:
|
||||
|
||||
```bash
|
||||
uv sync --extra torch --extra metrics --prerelease=allow
|
||||
```
|
||||
|
||||
在环境中运行 LLaMA-Factory:
|
||||
|
||||
```bash
|
||||
uv run --prerelease=allow llamafactory-cli train examples/train_lora/llama3_lora_pretrain.yaml
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
<details><summary>Windows 用户指南</summary>
|
||||
|
||||
#### 安装 PyTorch
|
||||
|
||||
Windows 平台需要额外手动安装 GPU 版本的 PyTorch 依赖包,您可以参考[官方网站](https://pytorch.org/get-started/locally/)和以下命令安装并测试 PyTorch 是否正确安装。
|
||||
|
||||
```bash
|
||||
pip uninstall torch torchvision torchaudio
|
||||
pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu126
|
||||
python -c "import torch; print(torch.cuda.is_available())"
|
||||
```
|
||||
|
||||
如果看到 `True` 则说明安装成功。
|
||||
|
||||
若遇到类似 `Can't pickle local object` 的报错,请设置 `dataloader_num_workers: 0`。
|
||||
|
||||
#### 安装 BitsAndBytes
|
||||
|
||||
如果要在 Windows 平台上开启量化 LoRA(QLoRA),需要安装预编译的 `bitsandbytes` 库, 支持 CUDA 11.1 到 12.2, 请根据您的 CUDA 版本情况选择适合的[发布版本](https://github.com/jllllll/bitsandbytes-windows-webui/releases/tag/wheels)。
|
||||
|
||||
```bash
|
||||
pip install https://github.com/jllllll/bitsandbytes-windows-webui/releases/download/wheels/bitsandbytes-0.41.2.post2-py3-none-win_amd64.whl
|
||||
```
|
||||
|
||||
如果要在 Windows 平台上开启 FlashAttention-2,需要安装预编译的 `flash-attn` 库,支持 CUDA 12.1 到 12.2,请根据需求到 [flash-attention](https://github.com/bdashore3/flash-attention/releases) 下载对应版本安装。
|
||||
#### 安装 Flash Attention-2
|
||||
|
||||
如果要在 Windows 平台上开启 FlashAttention-2,请使用 [flash-attention-windows-wheel](https://huggingface.co/lldacing/flash-attention-windows-wheel) 中的脚本自行编译与安装。
|
||||
|
||||
</details>
|
||||
|
||||
<details><summary>昇腾 NPU 用户指南</summary>
|
||||
|
||||
在昇腾 NPU 设备上安装 LLaMA Factory 时,需要指定额外依赖项,使用 `pip install -e ".[torch-npu,metrics]"` 命令安装。此外,还需要安装 **[Ascend CANN Toolkit 与 Kernels](https://www.hiascend.com/developer/download/community/result?module=cann)**,安装方法请参考[安装教程](https://www.hiascend.com/document/detail/zh/CANNCommunityEdition/80RC2alpha002/quickstart/quickstart/quickstart_18_0004.html)或使用以下命令:
|
||||
在昇腾 NPU 设备上安装 LLaMA Factory 时,请升级 Python 到 3.10 及以上,并需要指定额外依赖项,使用 `pip install -e ".[torch-npu,metrics]"` 命令安装。此外,还需要安装 **[Ascend CANN Toolkit 与 Kernels](https://www.hiascend.com/developer/download/community/result?module=cann)**,安装方法请参考[安装教程](https://www.hiascend.com/document/detail/zh/CANNCommunityEdition/80RC2alpha002/quickstart/quickstart/quickstart_18_0004.html)或使用以下命令:
|
||||
|
||||
```bash
|
||||
# 请替换 URL 为 CANN 版本和设备型号对应的 URL
|
||||
@ -392,12 +574,13 @@ bash Ascend-cann-kernels-910b_8.0.RC1.alpha001_linux.run --install
|
||||
source /usr/local/Ascend/ascend-toolkit/set_env.sh
|
||||
```
|
||||
|
||||
| 依赖项 | 至少 | 推荐 |
|
||||
| ------------ | ------- | ----------- |
|
||||
| CANN | 8.0.RC1 | 8.0.RC1 |
|
||||
| torch | 2.1.0 | 2.1.0 |
|
||||
| torch-npu | 2.1.0 | 2.1.0.post3 |
|
||||
| deepspeed | 0.13.2 | 0.13.2 |
|
||||
| 依赖项 | 至少 | 推荐 |
|
||||
| ------------ | ------- | -------------- |
|
||||
| CANN | 8.0.RC1 | 8.0.0.alpha002 |
|
||||
| torch | 2.1.0 | 2.4.0 |
|
||||
| torch-npu | 2.1.0 | 2.4.0.post2 |
|
||||
| deepspeed | 0.13.2 | 0.13.2 |
|
||||
| vllm-ascend | - | 0.7.3 |
|
||||
|
||||
请使用 `ASCEND_RT_VISIBLE_DEVICES` 而非 `CUDA_VISIBLE_DEVICES` 来指定运算设备。
|
||||
|
||||
@ -405,15 +588,51 @@ source /usr/local/Ascend/ascend-toolkit/set_env.sh
|
||||
|
||||
下载预构建 Docker 镜像:[32GB](http://mirrors.cn-central-221.ovaijisuan.com/detail/130.html) | [64GB](http://mirrors.cn-central-221.ovaijisuan.com/detail/131.html)
|
||||
|
||||
#### 安装 BitsAndBytes
|
||||
|
||||
如果要在 Ascend NPU 上进行基于 bitsandbytes 的 QLoRA 量化微调,请执行如下步骤:
|
||||
|
||||
1. 手动编译 bitsandbytes:请参考[安装文档](https://huggingface.co/docs/bitsandbytes/installation?backend=Ascend+NPU&platform=Ascend+NPU)完成 NPU 版的 bitsandbytes 安装,编译要求环境 cmake 版本不低于 3.22.1,g++ 版本不低于 12.x。
|
||||
|
||||
```bash
|
||||
# 从源码安装 bitsandbytes
|
||||
# 克隆 bitsandbytes 仓库, Ascend NPU 目前在 multi-backend-refactor 中支持
|
||||
git clone -b multi-backend-refactor https://github.com/bitsandbytes-foundation/bitsandbytes.git
|
||||
cd bitsandbytes/
|
||||
|
||||
# 安装依赖
|
||||
pip install -r requirements-dev.txt
|
||||
|
||||
# 安装编译工具依赖,该步骤在不同系统上命令有所不同,供参考
|
||||
apt-get install -y build-essential cmake
|
||||
|
||||
# 编译 & 安装
|
||||
cmake -DCOMPUTE_BACKEND=npu -S .
|
||||
make
|
||||
pip install .
|
||||
```
|
||||
|
||||
2. 安装 transformers 的 main 分支版本。
|
||||
|
||||
```bash
|
||||
git clone -b main https://github.com/huggingface/transformers.git
|
||||
cd transformers
|
||||
pip install .
|
||||
```
|
||||
|
||||
3. 在训练参数中设置 `double_quantization: false`,可参考[示例](examples/train_qlora/llama3_lora_sft_bnb_npu.yaml)。
|
||||
|
||||
</details>
|
||||
|
||||
### 数据准备
|
||||
|
||||
关于数据集文件的格式,请参考 [data/README_zh.md](data/README_zh.md) 的内容。你可以使用 HuggingFace / ModelScope 上的数据集或加载本地数据集。
|
||||
关于数据集文件的格式,请参考 [data/README_zh.md](data/README_zh.md) 的内容。你可以使用 HuggingFace / ModelScope / Modelers 上的数据集或加载本地数据集。
|
||||
|
||||
> [!NOTE]
|
||||
> 使用自定义数据集时,请更新 `data/dataset_info.json` 文件。
|
||||
|
||||
您也可以使用 **[Easy Dataset](https://github.com/ConardLi/easy-dataset)**、**[DataFlow](https://github.com/OpenDCAI/DataFlow)** 和 **[GraphGen](https://github.com/open-sciencelab/GraphGen)** 构建用于微调的合成数据。
|
||||
|
||||
### 快速开始
|
||||
|
||||
下面三行命令分别对 Llama3-8B-Instruct 模型进行 LoRA **微调**、**推理**和**合并**。
|
||||
@ -428,6 +647,8 @@ llamafactory-cli export examples/merge_lora/llama3_lora_sft.yaml
|
||||
|
||||
> [!TIP]
|
||||
> 使用 `llamafactory-cli help` 显示帮助信息。
|
||||
>
|
||||
> 遇到报错请先看[常见问题](https://github.com/hiyouga/LLaMA-Factory/issues/4614)。
|
||||
|
||||
### LLaMA Board 可视化微调(由 [Gradio](https://github.com/gradio-app/gradio) 驱动)
|
||||
|
||||
@ -467,21 +688,13 @@ CUDA 用户:
|
||||
|
||||
```bash
|
||||
docker build -f ./docker/docker-cuda/Dockerfile \
|
||||
--build-arg INSTALL_BNB=false \
|
||||
--build-arg INSTALL_VLLM=false \
|
||||
--build-arg INSTALL_DEEPSPEED=false \
|
||||
--build-arg INSTALL_FLASHATTN=false \
|
||||
--build-arg PIP_INDEX=https://pypi.org/simple \
|
||||
--build-arg EXTRAS=metrics \
|
||||
-t llamafactory:latest .
|
||||
|
||||
docker run -dit --gpus=all \
|
||||
-v ./hf_cache:/root/.cache/huggingface \
|
||||
-v ./ms_cache:/root/.cache/modelscope \
|
||||
-v ./data:/app/data \
|
||||
-v ./output:/app/output \
|
||||
docker run -dit --ipc=host --gpus=all \
|
||||
-p 7860:7860 \
|
||||
-p 8000:8000 \
|
||||
--shm-size 16G \
|
||||
--name llamafactory \
|
||||
llamafactory:latest
|
||||
|
||||
@ -491,18 +704,12 @@ docker exec -it llamafactory bash
|
||||
昇腾 NPU 用户:
|
||||
|
||||
```bash
|
||||
# 根据您的环境选择镜像
|
||||
docker build -f ./docker/docker-npu/Dockerfile \
|
||||
--build-arg INSTALL_DEEPSPEED=false \
|
||||
--build-arg PIP_INDEX=https://pypi.org/simple \
|
||||
--build-arg EXTRAS=torch-npu,metrics \
|
||||
-t llamafactory:latest .
|
||||
|
||||
# 根据您的资源更改 `device`
|
||||
docker run -dit \
|
||||
-v ./hf_cache:/root/.cache/huggingface \
|
||||
-v ./ms_cache:/root/.cache/modelscope \
|
||||
-v ./data:/app/data \
|
||||
-v ./output:/app/output \
|
||||
docker run -dit --ipc=host \
|
||||
-v /usr/local/dcmi:/usr/local/dcmi \
|
||||
-v /usr/local/bin/npu-smi:/usr/local/bin/npu-smi \
|
||||
-v /usr/local/Ascend/driver:/usr/local/Ascend/driver \
|
||||
@ -513,7 +720,6 @@ docker run -dit \
|
||||
--device /dev/davinci_manager \
|
||||
--device /dev/devmm_svm \
|
||||
--device /dev/hisi_hdc \
|
||||
--shm-size 16G \
|
||||
--name llamafactory \
|
||||
llamafactory:latest
|
||||
|
||||
@ -524,24 +730,15 @@ AMD ROCm 用户:
|
||||
|
||||
```bash
|
||||
docker build -f ./docker/docker-rocm/Dockerfile \
|
||||
--build-arg INSTALL_BNB=false \
|
||||
--build-arg INSTALL_VLLM=false \
|
||||
--build-arg INSTALL_DEEPSPEED=false \
|
||||
--build-arg INSTALL_FLASHATTN=false \
|
||||
--build-arg PIP_INDEX=https://pypi.org/simple \
|
||||
--build-arg EXTRAS=metrics \
|
||||
-t llamafactory:latest .
|
||||
|
||||
docker run -dit \
|
||||
-v ./hf_cache:/root/.cache/huggingface \
|
||||
-v ./ms_cache:/root/.cache/modelscope \
|
||||
-v ./data:/app/data \
|
||||
-v ./output:/app/output \
|
||||
-v ./saves:/app/saves \
|
||||
docker run -dit --ipc=host \
|
||||
-p 7860:7860 \
|
||||
-p 8000:8000 \
|
||||
--device /dev/kfd \
|
||||
--device /dev/dri \
|
||||
--shm-size 16G \
|
||||
--name llamafactory \
|
||||
llamafactory:latest
|
||||
|
||||
@ -550,11 +747,14 @@ docker exec -it llamafactory bash
|
||||
|
||||
</details>
|
||||
|
||||
<details><summary>数据卷详情</summary>
|
||||
<details><summary>使用数据卷</summary>
|
||||
|
||||
- `hf_cache`:使用宿主机的 Hugging Face 缓存文件夹,允许更改为新的目录。
|
||||
- `ms_cache`:类似 Hugging Face 缓存文件夹,为 ModelScope 用户提供。
|
||||
- `data`:宿主机中存放数据集的文件夹路径。
|
||||
您可以通过移除 Dockerfile 中 `VOLUME [ "/root/.cache/huggingface", "/app/shared_data", "/app/output" ]` 的注释来使用数据卷。
|
||||
|
||||
在构建 Docker 时使用参数 `-v ./hf_cache:/root/.cache/huggingface` 来挂载数据卷。各个数据卷的含义表示如下。
|
||||
|
||||
- `hf_cache`:使用宿主机的 Hugging Face 缓存文件夹。
|
||||
- `shared_data`:宿主机中存放数据集的文件夹路径。
|
||||
- `output`:将导出目录设置为该路径后,即可在宿主机中访问导出后的模型。
|
||||
|
||||
</details>
|
||||
@ -562,11 +762,13 @@ docker exec -it llamafactory bash
|
||||
### 利用 vLLM 部署 OpenAI API
|
||||
|
||||
```bash
|
||||
API_PORT=8000 llamafactory-cli api examples/inference/llama3_vllm.yaml
|
||||
API_PORT=8000 llamafactory-cli api examples/inference/llama3.yaml infer_backend=vllm vllm_enforce_eager=true
|
||||
```
|
||||
|
||||
> [!TIP]
|
||||
> API 文档请查阅[这里](https://platform.openai.com/docs/api-reference/chat/create)。
|
||||
>
|
||||
> 示例:[图像理解](scripts/api_example/test_image.py) | [工具调用](scripts/api_example/test_toolcall.py)
|
||||
|
||||
### 从魔搭社区下载
|
||||
|
||||
@ -578,6 +780,16 @@ export USE_MODELSCOPE_HUB=1 # Windows 使用 `set USE_MODELSCOPE_HUB=1`
|
||||
|
||||
将 `model_name_or_path` 设置为模型 ID 来加载对应的模型。在[魔搭社区](https://modelscope.cn/models)查看所有可用的模型,例如 `LLM-Research/Meta-Llama-3-8B-Instruct`。
|
||||
|
||||
### 从魔乐社区下载
|
||||
|
||||
您也可以通过下述方法,使用魔乐社区下载数据集和模型。
|
||||
|
||||
```bash
|
||||
export USE_OPENMIND_HUB=1 # Windows 使用 `set USE_OPENMIND_HUB=1`
|
||||
```
|
||||
|
||||
将 `model_name_or_path` 设置为模型 ID 来加载对应的模型。在[魔乐社区](https://modelers.cn/models)查看所有可用的模型,例如 `TeleAI/TeleChat-7B-pt`。
|
||||
|
||||
### 使用 W&B 面板
|
||||
|
||||
若要使用 [Weights & Biases](https://wandb.ai) 记录实验数据,请在 yaml 文件中添加下面的参数。
|
||||
@ -589,6 +801,21 @@ run_name: test_run # 可选
|
||||
|
||||
在启动训练任务时,将 `WANDB_API_KEY` 设置为[密钥](https://wandb.ai/authorize)来登录 W&B 账户。
|
||||
|
||||
### 使用 SwanLab 面板
|
||||
|
||||
若要使用 [SwanLab](https://github.com/SwanHubX/SwanLab) 记录实验数据,请在 yaml 文件中添加下面的参数。
|
||||
|
||||
```yaml
|
||||
use_swanlab: true
|
||||
swanlab_run_name: test_run # 可选
|
||||
```
|
||||
|
||||
在启动训练任务时,登录SwanLab账户有以下三种方式:
|
||||
|
||||
方式一:在 yaml 文件中添加 `swanlab_api_key=<your_api_key>` ,并设置为你的 [API 密钥](https://swanlab.cn/settings)。
|
||||
方式二:将环境变量 `SWANLAB_API_KEY` 设置为你的 [API 密钥](https://swanlab.cn/settings)。
|
||||
方式三:启动前使用 `swanlab login` 命令完成登录。
|
||||
|
||||
## 使用了 LLaMA Factory 的项目
|
||||
|
||||
如果您有项目希望添加至下述列表,请通过邮件联系或者创建一个 PR。
|
||||
@ -676,16 +903,21 @@ run_name: test_run # 可选
|
||||
1. Zeng et al. Perceive, Reflect, and Plan: Designing LLM Agent for Goal-Directed City Navigation without Instructions. 2024. [[arxiv]](https://arxiv.org/abs/2408.04168)
|
||||
1. Xia et al. Using Pre-trained Language Model for Accurate ESG Prediction. FinNLP 2024. [[paper]](https://aclanthology.org/2024.finnlp-2.1/)
|
||||
1. Liang et al. I-SHEEP: Self-Alignment of LLM from Scratch through an Iterative Self-Enhancement Paradigm. 2024. [[arxiv]](https://arxiv.org/abs/2408.08072)
|
||||
1. Bai et al. Aligning Large Language Model with Direct Multi-Preference Optimization for Recommendation. CIKM 2024. [[paper]](https://dl.acm.org/doi/10.1145/3627673.3679611)
|
||||
1. **[StarWhisper](https://github.com/Yu-Yang-Li/StarWhisper)**: 天文大模型 StarWhisper,基于 ChatGLM2-6B 和 Qwen-14B 在天文数据上微调而得。
|
||||
1. **[DISC-LawLLM](https://github.com/FudanDISC/DISC-LawLLM)**: 中文法律领域大模型 DISC-LawLLM,基于 Baichuan-13B 微调而得,具有法律推理和知识检索能力。
|
||||
1. **[Sunsimiao](https://github.com/X-D-Lab/Sunsimiao)**: 孙思邈中文医疗大模型 Sumsimiao,基于 Baichuan-7B 和 ChatGLM-6B 在中文医疗数据上微调而得。
|
||||
1. **[CareGPT](https://github.com/WangRongsheng/CareGPT)**: 医疗大模型项目 CareGPT,基于 LLaMA2-7B 和 Baichuan-13B 在中文医疗数据上微调而得。
|
||||
1. **[MachineMindset](https://github.com/PKU-YuanGroup/Machine-Mindset/)**:MBTI性格大模型项目,根据数据集与训练方式让任意 LLM 拥有 16 个不同的性格类型。
|
||||
1. **[Luminia-13B-v3](https://huggingface.co/Nekochu/Luminia-13B-v3)**:一个用于生成 Stable Diffusion 提示词的大型语言模型。[[🤗Demo]](https://huggingface.co/spaces/Nekochu/Luminia-13B_SD_Prompt)
|
||||
1. **[Luminia-13B-v3](https://huggingface.co/Nekochu/Luminia-13B-v3)**:一个用于生成 Stable Diffusion 提示词的大型语言模型。[[demo]](https://huggingface.co/spaces/Nekochu/Luminia-13B_SD_Prompt)
|
||||
1. **[Chinese-LLaVA-Med](https://github.com/BUAADreamer/Chinese-LLaVA-Med)**:中文多模态医学大模型,基于 LLaVA-1.5-7B 在中文多模态医疗数据上微调而得。
|
||||
1. **[AutoRE](https://github.com/THUDM/AutoRE)**:基于大语言模型的文档级关系抽取系统。
|
||||
1. **[NVIDIA RTX AI Toolkit](https://github.com/NVIDIA/RTX-AI-Toolkit)**:在 Windows 主机上利用英伟达 RTX 设备进行大型语言模型微调的开发包。
|
||||
1. **[LazyLLM](https://github.com/LazyAGI/LazyLLM)**:一个低代码构建多 Agent 大模型应用的开发工具,支持基于 LLaMA Factory 的模型微调.
|
||||
1. **[RAG-Retrieval](https://github.com/NLPJCL/RAG-Retrieval)**:一个全链路 RAG 检索模型微调、推理和蒸馏代码库。[[blog]](https://zhuanlan.zhihu.com/p/987727357)
|
||||
1. **[360-LLaMA-Factory](https://github.com/Qihoo360/360-LLaMA-Factory)**:一个魔改后的代码库,通过 Ring Attention 支持长序列的 SFT 和 DPO 训练。
|
||||
1. **[Sky-T1](https://novasky-ai.github.io/posts/sky-t1/)**:由 NovaSky AI 微调的低成本类 o1 长推理模型。
|
||||
1. **[WeClone](https://github.com/xming521/WeClone)**:从聊天记录创造数字分身的一站式解决方案。
|
||||
|
||||
</details>
|
||||
|
||||
@ -693,7 +925,7 @@ run_name: test_run # 可选
|
||||
|
||||
本仓库的代码依照 [Apache-2.0](LICENSE) 协议开源。
|
||||
|
||||
使用模型权重时,请遵循对应的模型协议:[Baichuan 2](https://huggingface.co/baichuan-inc/Baichuan2-7B-Base/blob/main/Community%20License%20for%20Baichuan%202%20Model.pdf) / [BLOOM](https://huggingface.co/spaces/bigscience/license) / [ChatGLM3](https://github.com/THUDM/ChatGLM3/blob/main/MODEL_LICENSE) / [Command R](https://cohere.com/c4ai-cc-by-nc-license) / [DeepSeek](https://github.com/deepseek-ai/DeepSeek-LLM/blob/main/LICENSE-MODEL) / [Falcon](https://huggingface.co/tiiuae/falcon-180B/blob/main/LICENSE.txt) / [Gemma](https://ai.google.dev/gemma/terms) / [GLM-4](https://huggingface.co/THUDM/glm-4-9b/blob/main/LICENSE) / [InternLM2](https://github.com/InternLM/InternLM#license) / [Llama](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md) / [Llama 2 (LLaVA-1.5)](https://ai.meta.com/llama/license/) / [Llama 3](https://llama.meta.com/llama3/license/) / [MiniCPM](https://github.com/OpenBMB/MiniCPM/blob/main/MiniCPM%20Model%20License.md) / [Mistral](LICENSE) / [OLMo](LICENSE) / [Phi-1.5/Phi-2](https://huggingface.co/microsoft/phi-1_5/resolve/main/Research%20License.docx) / [Phi-3](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct/blob/main/LICENSE) / [Qwen](https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT) / [StarCoder 2](https://huggingface.co/spaces/bigcode/bigcode-model-license-agreement) / [XVERSE](https://github.com/xverse-ai/XVERSE-13B/blob/main/MODEL_LICENSE.pdf) / [Yi](https://huggingface.co/01-ai/Yi-6B/blob/main/LICENSE) / [Yi-1.5](LICENSE) / [Yuan 2](https://github.com/IEIT-Yuan/Yuan-2.0/blob/main/LICENSE-Yuan)
|
||||
使用模型权重时,请遵循对应的模型协议:[Baichuan 2](https://huggingface.co/baichuan-inc/Baichuan2-7B-Base/blob/main/Community%20License%20for%20Baichuan%202%20Model.pdf) / [BLOOM](https://huggingface.co/spaces/bigscience/license) / [ChatGLM3](https://github.com/THUDM/ChatGLM3/blob/main/MODEL_LICENSE) / [Command R](https://cohere.com/c4ai-cc-by-nc-license) / [DeepSeek](https://github.com/deepseek-ai/DeepSeek-LLM/blob/main/LICENSE-MODEL) / [Falcon](https://huggingface.co/tiiuae/falcon-180B/blob/main/LICENSE.txt) / [Gemma](https://ai.google.dev/gemma/terms) / [GLM-4](https://huggingface.co/THUDM/glm-4-9b/blob/main/LICENSE) / [GPT-2](https://github.com/openai/gpt-2/blob/master/LICENSE) / [Granite](LICENSE) / [Index](https://huggingface.co/IndexTeam/Index-1.9B/blob/main/LICENSE) / [InternLM](https://github.com/InternLM/InternLM#license) / [Llama](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md) / [Llama 2](https://ai.meta.com/llama/license/) / [Llama 3](https://llama.meta.com/llama3/license/) / [Llama 4](https://github.com/meta-llama/llama-models/blob/main/models/llama4/LICENSE) / [MiniCPM](https://github.com/OpenBMB/MiniCPM/blob/main/MiniCPM%20Model%20License.md) / [Mistral/Mixtral/Pixtral](LICENSE) / [OLMo](LICENSE) / [Phi-1.5/Phi-2](https://huggingface.co/microsoft/phi-1_5/resolve/main/Research%20License.docx) / [Phi-3/Phi-4](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct/blob/main/LICENSE) / [Qwen](https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT) / [Skywork](https://huggingface.co/Skywork/Skywork-13B-base/blob/main/Skywork%20Community%20License.pdf) / [StarCoder 2](https://huggingface.co/spaces/bigcode/bigcode-model-license-agreement) / [TeleChat2](https://huggingface.co/Tele-AI/telechat-7B/blob/main/TeleChat%E6%A8%A1%E5%9E%8B%E7%A4%BE%E5%8C%BA%E8%AE%B8%E5%8F%AF%E5%8D%8F%E8%AE%AE.pdf) / [XVERSE](https://github.com/xverse-ai/XVERSE-13B/blob/main/MODEL_LICENSE.pdf) / [Yi](https://huggingface.co/01-ai/Yi-6B/blob/main/LICENSE) / [Yi-1.5](LICENSE) / [Yuan 2](https://github.com/IEIT-Yuan/Yuan-2.0/blob/main/LICENSE-Yuan)
|
||||
|
||||
## 引用
|
||||
|
||||
|
38
assets/alaya_new.svg
Normal file
After Width: | Height: | Size: 47 KiB |
1216
assets/benchmark.svg
Before Width: | Height: | Size: 29 KiB |
Before Width: | Height: | Size: 145 KiB After Width: | Height: | Size: 166 KiB |
BIN
assets/wechat_alaya.png
Normal file
After Width: | Height: | Size: 209 KiB |
Before Width: | Height: | Size: 151 KiB After Width: | Height: | Size: 171 KiB |
@ -1,12 +1,15 @@
|
||||
The [dataset_info.json](dataset_info.json) contains all available datasets. If you are using a custom dataset, please **make sure** to add a *dataset description* in `dataset_info.json` and specify `dataset: dataset_name` before training to use it.
|
||||
|
||||
Currently we support datasets in **alpaca** and **sharegpt** format.
|
||||
The `dataset_info.json` file should be put in the `dataset_dir` directory. You can change `dataset_dir` to use another directory. The default value is `./data`.
|
||||
|
||||
Currently we support datasets in **alpaca** and **sharegpt** format. Allowed file types include json, jsonl, csv, parquet, arrow.
|
||||
|
||||
```json
|
||||
"dataset_name": {
|
||||
"hf_hub_url": "the name of the dataset repository on the Hugging Face hub. (if specified, ignore script_url and file_name)",
|
||||
"ms_hub_url": "the name of the dataset repository on the Model Scope hub. (if specified, ignore script_url and file_name)",
|
||||
"script_url": "the name of the directory containing a dataset loading script. (if specified, ignore file_name)",
|
||||
"hf_hub_url": "the name of the dataset repository on the Hugging Face hub. (if specified, ignore script_url, file_name and cloud_file_name)",
|
||||
"ms_hub_url": "the name of the dataset repository on the Model Scope hub. (if specified, ignore script_url, file_name and cloud_file_name)",
|
||||
"script_url": "the name of the directory containing a dataset loading script. (if specified, ignore file_name and cloud_file_name)",
|
||||
"cloud_file_name": "the name of the dataset file in s3/gcs cloud storage. (if specified, ignore file_name)",
|
||||
"file_name": "the name of the dataset folder or dataset file in this directory. (required if above are not specified)",
|
||||
"formatting": "the format of the dataset. (optional, default: alpaca, can be chosen from {alpaca, sharegpt})",
|
||||
"ranking": "whether the dataset is a preference dataset or not. (default: False)",
|
||||
@ -24,6 +27,7 @@ Currently we support datasets in **alpaca** and **sharegpt** format.
|
||||
"tools": "the column name in the dataset containing the tool description. (default: None)",
|
||||
"images": "the column name in the dataset containing the image inputs. (default: None)",
|
||||
"videos": "the column name in the dataset containing the videos inputs. (default: None)",
|
||||
"audios": "the column name in the dataset containing the audios inputs. (default: None)",
|
||||
"chosen": "the column name in the dataset containing the chosen answers. (default: None)",
|
||||
"rejected": "the column name in the dataset containing the rejected answers. (default: None)",
|
||||
"kto_tag": "the column name in the dataset containing the kto tags. (default: None)"
|
||||
@ -46,7 +50,9 @@ Currently we support datasets in **alpaca** and **sharegpt** format.
|
||||
|
||||
* [Example dataset](alpaca_en_demo.json)
|
||||
|
||||
In supervised fine-tuning, the `instruction` column will be concatenated with the `input` column and used as the human prompt, then the human prompt would be `instruction\ninput`. The `output` column represents the model response.
|
||||
In supervised fine-tuning, the `instruction` column will be concatenated with the `input` column and used as the user prompt, then the user prompt would be `instruction\ninput`. The `output` column represents the model response.
|
||||
|
||||
For reasoning models, if the dataset contains chain-of-thought (CoT), the CoT needs to be placed in the model responses, such as `<think>cot</think>output`.
|
||||
|
||||
The `system` column will be used as the system prompt if specified.
|
||||
|
||||
@ -55,13 +61,13 @@ The `history` column is a list consisting of string tuples representing prompt-r
|
||||
```json
|
||||
[
|
||||
{
|
||||
"instruction": "human instruction (required)",
|
||||
"input": "human input (optional)",
|
||||
"instruction": "user instruction (required)",
|
||||
"input": "user input (optional)",
|
||||
"output": "model response (required)",
|
||||
"system": "system prompt (optional)",
|
||||
"history": [
|
||||
["human instruction in the first round (optional)", "model response in the first round (optional)"],
|
||||
["human instruction in the second round (optional)", "model response in the second round (optional)"]
|
||||
["user instruction in the first round (optional)", "model response in the first round (optional)"],
|
||||
["user instruction in the second round (optional)", "model response in the second round (optional)"]
|
||||
]
|
||||
}
|
||||
]
|
||||
@ -82,9 +88,14 @@ Regarding the above dataset, the *dataset description* in `dataset_info.json` sh
|
||||
}
|
||||
```
|
||||
|
||||
> [!TIP]
|
||||
> If the model has reasoning capabilities (e.g. Qwen3) but the dataset does not contain chain-of-thought (CoT), LLaMA-Factory will automatically add empty CoT to the data. When `enable_thinking` is `True` (slow thinking, by default), the empty CoT will be added to the model responses and loss computation will be considered; otherwise (fast thinking), it will be added to the user prompts and loss computation will be ignored. Please keep the `enable_thinking` parameter consistent during training and inference.
|
||||
>
|
||||
> If you want to train data containing CoT with slow thinking and data without CoT with fast thinking, you can set `enable_thinking` to `None`. However, this feature is relatively complicated and should be used with caution.
|
||||
|
||||
### Pre-training Dataset
|
||||
|
||||
- [Example dataset](c4_demo.json)
|
||||
- [Example dataset](c4_demo.jsonl)
|
||||
|
||||
In pre-training, only the `text` column will be used for model learning.
|
||||
|
||||
@ -115,8 +126,8 @@ It requires a better response in `chosen` column and a worse response in `reject
|
||||
```json
|
||||
[
|
||||
{
|
||||
"instruction": "human instruction (required)",
|
||||
"input": "human input (optional)",
|
||||
"instruction": "user instruction (required)",
|
||||
"input": "user input (optional)",
|
||||
"chosen": "chosen answer (required)",
|
||||
"rejected": "rejected answer (required)"
|
||||
}
|
||||
@ -150,6 +161,10 @@ An additional column `images` is required. Please refer to the [sharegpt](#share
|
||||
|
||||
An additional column `videos` is required. Please refer to the [sharegpt](#sharegpt-format) format for details.
|
||||
|
||||
### Multimodal Audio Dataset
|
||||
|
||||
An additional column `audios` is required. Please refer to the [sharegpt](#sharegpt-format) format for details.
|
||||
|
||||
## Sharegpt Format
|
||||
|
||||
### Supervised Fine-Tuning Dataset
|
||||
@ -158,7 +173,7 @@ An additional column `videos` is required. Please refer to the [sharegpt](#share
|
||||
|
||||
Compared to the alpaca format, the sharegpt format allows the datasets have **more roles**, such as human, gpt, observation and function. They are presented in a list of objects in the `conversations` column.
|
||||
|
||||
Note that the human and observation should appear in odd positions, while gpt and function should appear in even positions.
|
||||
Note that the human and observation should appear in odd positions, while gpt and function should appear in even positions. The gpt and function will be learned by the model.
|
||||
|
||||
```json
|
||||
[
|
||||
@ -166,7 +181,7 @@ Note that the human and observation should appear in odd positions, while gpt an
|
||||
"conversations": [
|
||||
{
|
||||
"from": "human",
|
||||
"value": "human instruction"
|
||||
"value": "user instruction"
|
||||
},
|
||||
{
|
||||
"from": "function_call",
|
||||
@ -217,7 +232,7 @@ Preference datasets in sharegpt format also require a better message in `chosen`
|
||||
"conversations": [
|
||||
{
|
||||
"from": "human",
|
||||
"value": "human instruction"
|
||||
"value": "user instruction"
|
||||
},
|
||||
{
|
||||
"from": "gpt",
|
||||
@ -225,7 +240,7 @@ Preference datasets in sharegpt format also require a better message in `chosen`
|
||||
},
|
||||
{
|
||||
"from": "human",
|
||||
"value": "human instruction"
|
||||
"value": "user instruction"
|
||||
}
|
||||
],
|
||||
"chosen": {
|
||||
@ -267,7 +282,7 @@ KTO datasets require a extra `kto_tag` column containing the boolean human feedb
|
||||
"conversations": [
|
||||
{
|
||||
"from": "human",
|
||||
"value": "human instruction"
|
||||
"value": "user instruction"
|
||||
},
|
||||
{
|
||||
"from": "gpt",
|
||||
@ -296,7 +311,7 @@ Regarding the above dataset, the *dataset description* in `dataset_info.json` sh
|
||||
|
||||
- [Example dataset](mllm_demo.json)
|
||||
|
||||
Multimodal image datasets require a `images` column containing the paths to the input images.
|
||||
Multimodal image datasets require an `images` column containing the paths to the input images.
|
||||
|
||||
The number of images should be identical to the `<image>` tokens in the conversations.
|
||||
|
||||
@ -306,7 +321,7 @@ The number of images should be identical to the `<image>` tokens in the conversa
|
||||
"conversations": [
|
||||
{
|
||||
"from": "human",
|
||||
"value": "<image>human instruction"
|
||||
"value": "<image>user instruction"
|
||||
},
|
||||
{
|
||||
"from": "gpt",
|
||||
@ -347,7 +362,7 @@ The number of videos should be identical to the `<video>` tokens in the conversa
|
||||
"conversations": [
|
||||
{
|
||||
"from": "human",
|
||||
"value": "<video>human instruction"
|
||||
"value": "<video>user instruction"
|
||||
},
|
||||
{
|
||||
"from": "gpt",
|
||||
@ -374,6 +389,47 @@ Regarding the above dataset, the *dataset description* in `dataset_info.json` sh
|
||||
}
|
||||
```
|
||||
|
||||
### Multimodal Audio Dataset
|
||||
|
||||
- [Example dataset](mllm_audio_demo.json)
|
||||
|
||||
Multimodal audio datasets require an `audios` column containing the paths to the input audios.
|
||||
|
||||
The number of audios should be identical to the `<audio>` tokens in the conversations.
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"conversations": [
|
||||
{
|
||||
"from": "human",
|
||||
"value": "<audio>user instruction"
|
||||
},
|
||||
{
|
||||
"from": "gpt",
|
||||
"value": "model response"
|
||||
}
|
||||
],
|
||||
"audios": [
|
||||
"audio path (required)"
|
||||
]
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
|
||||
|
||||
```json
|
||||
"dataset_name": {
|
||||
"file_name": "data.json",
|
||||
"formatting": "sharegpt",
|
||||
"columns": {
|
||||
"messages": "conversations",
|
||||
"audios": "audios"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### OpenAI Format
|
||||
|
||||
The openai format is simply a special case of the sharegpt format, where the first message may be a system prompt.
|
||||
@ -388,7 +444,7 @@ The openai format is simply a special case of the sharegpt format, where the fir
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": "human instruction"
|
||||
"content": "user instruction"
|
||||
},
|
||||
{
|
||||
"role": "assistant",
|
||||
|
@ -1,6 +1,8 @@
|
||||
[dataset_info.json](dataset_info.json) 包含了所有可用的数据集。如果您希望使用自定义数据集,请**务必**在 `dataset_info.json` 文件中添加*数据集描述*,并通过修改 `dataset: 数据集名称` 配置来使用数据集。
|
||||
|
||||
目前我们支持 **alpaca** 格式和 **sharegpt** 格式的数据集。
|
||||
其中 `dataset_info.json` 文件应放置在 `dataset_dir` 目录下。您可以通过修改 `dataset_dir` 参数来使用其他目录。默认值为 `./data`。
|
||||
|
||||
目前我们支持 **alpaca** 格式和 **sharegpt** 格式的数据集。允许的文件类型包括 json、jsonl、csv、parquet 和 arrow。
|
||||
|
||||
```json
|
||||
"数据集名称": {
|
||||
@ -24,6 +26,7 @@
|
||||
"tools": "数据集代表工具描述的表头名称(默认:None)",
|
||||
"images": "数据集代表图像输入的表头名称(默认:None)",
|
||||
"videos": "数据集代表视频输入的表头名称(默认:None)",
|
||||
"audios": "数据集代表音频输入的表头名称(默认:None)",
|
||||
"chosen": "数据集代表更优回答的表头名称(默认:None)",
|
||||
"rejected": "数据集代表更差回答的表头名称(默认:None)",
|
||||
"kto_tag": "数据集代表 KTO 标签的表头名称(默认:None)"
|
||||
@ -46,7 +49,9 @@
|
||||
|
||||
- [样例数据集](alpaca_zh_demo.json)
|
||||
|
||||
在指令监督微调时,`instruction` 列对应的内容会与 `input` 列对应的内容拼接后作为人类指令,即人类指令为 `instruction\ninput`。而 `output` 列对应的内容为模型回答。
|
||||
在指令监督微调时,`instruction` 列对应的内容会与 `input` 列对应的内容拼接后作为提示词,即提示词为 `instruction\ninput`。而 `output` 列对应的内容为模型回答。
|
||||
|
||||
对于推理类模型的微调,如果数据集包含思维链,则需要把思维链放在模型回答中,例如 `<think>cot</think>output`。
|
||||
|
||||
如果指定,`system` 列对应的内容将被作为系统提示词。
|
||||
|
||||
@ -55,8 +60,8 @@
|
||||
```json
|
||||
[
|
||||
{
|
||||
"instruction": "人类指令(必填)",
|
||||
"input": "人类输入(选填)",
|
||||
"instruction": "用户指令(必填)",
|
||||
"input": "用户输入(选填)",
|
||||
"output": "模型回答(必填)",
|
||||
"system": "系统提示词(选填)",
|
||||
"history": [
|
||||
@ -82,9 +87,14 @@
|
||||
}
|
||||
```
|
||||
|
||||
> [!TIP]
|
||||
> 如果模型本身具备推理能力(如 Qwen3)而数据集不包含思维链,LLaMA-Factory 会自动为数据添加空思维链。当 `enable_thinking` 为 `True` 时(慢思考,默认),空思维链会添加到模型回答中并且计算损失,否则会添加到用户指令中并且不计算损失(快思考)。请在训练和推理时保持 `enable_thinking` 参数一致。
|
||||
>
|
||||
> 如果您希望训练包含思维链的数据时使用慢思考,训练不包含思维链的数据时使用快思考,可以设置 `enable_thinking` 为 `None`。但该功能较为复杂,请谨慎使用。
|
||||
|
||||
### 预训练数据集
|
||||
|
||||
- [样例数据集](c4_demo.json)
|
||||
- [样例数据集](c4_demo.jsonl)
|
||||
|
||||
在预训练时,只有 `text` 列中的内容会用于模型学习。
|
||||
|
||||
@ -115,8 +125,8 @@
|
||||
```json
|
||||
[
|
||||
{
|
||||
"instruction": "人类指令(必填)",
|
||||
"input": "人类输入(选填)",
|
||||
"instruction": "用户指令(必填)",
|
||||
"input": "用户输入(选填)",
|
||||
"chosen": "优质回答(必填)",
|
||||
"rejected": "劣质回答(必填)"
|
||||
}
|
||||
@ -150,6 +160,10 @@ KTO 数据集需要提供额外的 `kto_tag` 列。详情请参阅 [sharegpt](#s
|
||||
|
||||
多模态视频数据集需要提供额外的 `videos` 列。详情请参阅 [sharegpt](#sharegpt-格式)。
|
||||
|
||||
### 多模态音频数据集
|
||||
|
||||
多模态音频数据集需要提供额外的 `audios` 列。详情请参阅 [sharegpt](#sharegpt-格式)。
|
||||
|
||||
## Sharegpt 格式
|
||||
|
||||
### 指令监督微调数据集
|
||||
@ -158,7 +172,7 @@ KTO 数据集需要提供额外的 `kto_tag` 列。详情请参阅 [sharegpt](#s
|
||||
|
||||
相比 alpaca 格式的数据集,sharegpt 格式支持**更多的角色种类**,例如 human、gpt、observation、function 等等。它们构成一个对象列表呈现在 `conversations` 列中。
|
||||
|
||||
注意其中 human 和 observation 必须出现在奇数位置,gpt 和 function 必须出现在偶数位置。
|
||||
注意其中 human 和 observation 必须出现在奇数位置,gpt 和 function 必须出现在偶数位置。默认所有的 gpt 和 function 会被用于学习。
|
||||
|
||||
```json
|
||||
[
|
||||
@ -166,7 +180,7 @@ KTO 数据集需要提供额外的 `kto_tag` 列。详情请参阅 [sharegpt](#s
|
||||
"conversations": [
|
||||
{
|
||||
"from": "human",
|
||||
"value": "人类指令"
|
||||
"value": "用户指令"
|
||||
},
|
||||
{
|
||||
"from": "function_call",
|
||||
@ -217,7 +231,7 @@ Sharegpt 格式的偏好数据集同样需要在 `chosen` 列中提供更优的
|
||||
"conversations": [
|
||||
{
|
||||
"from": "human",
|
||||
"value": "人类指令"
|
||||
"value": "用户指令"
|
||||
},
|
||||
{
|
||||
"from": "gpt",
|
||||
@ -225,7 +239,7 @@ Sharegpt 格式的偏好数据集同样需要在 `chosen` 列中提供更优的
|
||||
},
|
||||
{
|
||||
"from": "human",
|
||||
"value": "人类指令"
|
||||
"value": "用户指令"
|
||||
}
|
||||
],
|
||||
"chosen": {
|
||||
@ -267,7 +281,7 @@ KTO 数据集需要额外添加一个 `kto_tag` 列,包含 bool 类型的人
|
||||
"conversations": [
|
||||
{
|
||||
"from": "human",
|
||||
"value": "人类指令"
|
||||
"value": "用户指令"
|
||||
},
|
||||
{
|
||||
"from": "gpt",
|
||||
@ -306,7 +320,7 @@ KTO 数据集需要额外添加一个 `kto_tag` 列,包含 bool 类型的人
|
||||
"conversations": [
|
||||
{
|
||||
"from": "human",
|
||||
"value": "<image>人类指令"
|
||||
"value": "<image><image>用户指令"
|
||||
},
|
||||
{
|
||||
"from": "gpt",
|
||||
@ -314,6 +328,7 @@ KTO 数据集需要额外添加一个 `kto_tag` 列,包含 bool 类型的人
|
||||
}
|
||||
],
|
||||
"images": [
|
||||
"图像路径(必填)",
|
||||
"图像路径(必填)"
|
||||
]
|
||||
}
|
||||
@ -347,7 +362,7 @@ KTO 数据集需要额外添加一个 `kto_tag` 列,包含 bool 类型的人
|
||||
"conversations": [
|
||||
{
|
||||
"from": "human",
|
||||
"value": "<video>人类指令"
|
||||
"value": "<video><video>用户指令"
|
||||
},
|
||||
{
|
||||
"from": "gpt",
|
||||
@ -355,6 +370,7 @@ KTO 数据集需要额外添加一个 `kto_tag` 列,包含 bool 类型的人
|
||||
}
|
||||
],
|
||||
"videos": [
|
||||
"视频路径(必填)",
|
||||
"视频路径(必填)"
|
||||
]
|
||||
}
|
||||
@ -374,6 +390,49 @@ KTO 数据集需要额外添加一个 `kto_tag` 列,包含 bool 类型的人
|
||||
}
|
||||
```
|
||||
|
||||
### 多模态音频数据集
|
||||
|
||||
- [样例数据集](mllm_audio_demo.json)
|
||||
|
||||
多模态音频数据集需要额外添加一个 `audios` 列,包含输入音频的路径。
|
||||
|
||||
注意音频的数量必须与文本中所有 `<audio>` 标记的数量严格一致。
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"conversations": [
|
||||
{
|
||||
"from": "human",
|
||||
"value": "<audio><audio>用户指令"
|
||||
},
|
||||
{
|
||||
"from": "gpt",
|
||||
"value": "模型回答"
|
||||
}
|
||||
],
|
||||
"audios": [
|
||||
"音频路径(必填)",
|
||||
"音频路径(必填)"
|
||||
]
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
对于上述格式的数据,`dataset_info.json` 中的*数据集描述*应为:
|
||||
|
||||
```json
|
||||
"数据集名称": {
|
||||
"file_name": "data.json",
|
||||
"formatting": "sharegpt",
|
||||
"columns": {
|
||||
"messages": "conversations",
|
||||
"audios": "audios"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
### OpenAI 格式
|
||||
|
||||
OpenAI 格式仅仅是 sharegpt 格式的一种特殊情况,其中第一条消息可能是系统提示词。
|
||||
@ -388,7 +447,7 @@ OpenAI 格式仅仅是 sharegpt 格式的一种特殊情况,其中第一条消
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": "人类指令"
|
||||
"content": "用户指令"
|
||||
},
|
||||
{
|
||||
"role": "assistant",
|
||||
|
@ -4154,11 +4154,6 @@
|
||||
"input": "The beauty of friendship",
|
||||
"output": "In life we all embark on journeys grand,\nAnd wander through the trials of our days;\nAt times we'll come across a barren land,\nWhere hardships test and challenge in new ways.\n\nBut through the toil, we're never all alone,\nFor friendship shines as brilliant as the sun;\nWith bonds that strengthen, deepen and have grown,\nA beauty that cannot be rivaled, none.\n\nA friend who's there with hands to lift us high,\nThrough laughter and the joy, through tears and pain,\nA beacon when the road ahead's unclear,\nA treasure more than riches one could gain.\n\nOh friendship, pure and ever shining bright,\nA beauty that brings everlasting light."
|
||||
},
|
||||
{
|
||||
"instruction": "The news and other sources have been claiming that Democracy is losing ground around the world. Give an example of a recent event to back this claim up.",
|
||||
"input": "",
|
||||
"output": "One recent example that supports this claim is the events in Hong Kong. In 2019 and 2020, the Chinese government imposed a national security law to crack down on pro-democracy protests in Hong Kong, effectively curtailing the freedoms and autonomy that the region once enjoyed. This move by Beijing to assert greater control over Hong Kong has led to concerns by human rights advocates and international leaders that democracy is being undermined in the territory. There is fear that Hong Kong’s outspoken media, independent judiciary, and civil liberties look increasingly vulnerable as Beijing tightens its grip on the city. Additionally, there have been arrests of multiple leading pro-Democracy figures such as Joshua Wong, Agnes Chow and Jimmy Lai in addition to many others who have become political prisoners."
|
||||
},
|
||||
{
|
||||
"instruction": "Redact the following sentence to remove all personal information: \"John Smith from Birmingham works in a factory.\"",
|
||||
"input": "",
|
||||
@ -4999,4 +4994,4 @@
|
||||
"input": "Time waits for no one.",
|
||||
"output": "No one can stop time from moving forward."
|
||||
}
|
||||
]
|
||||
]
|
||||
|
@ -4999,4 +4999,4 @@
|
||||
"input": "",
|
||||
"output": "安第斯山脉位于南美洲,横跨七个国家,包括委内瑞拉,哥伦比亚,厄瓜多尔,秘鲁,玻利维亚,智利和阿根廷。安第斯山脉是世界上最长的山脉之一,全长约7,000千米(4,350英里),其山脉沿着南美洲西海岸蜿蜒延伸,平均海拔约为4,000米(13,000英尺)。在其南部,安第斯山脉宽度达到700千米(430英里),在其北部宽度约为500千米(310英里)。"
|
||||
}
|
||||
]
|
||||
]
|
||||
|
@ -1,3 +1,18 @@
|
||||
# Copyright 2025 the LlamaFactory team.
|
||||
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import json
|
||||
import os
|
||||
|
||||
@ -10,16 +25,16 @@ _DESCRIPTION = "BELLE multiturn chat dataset."
|
||||
|
||||
_CITATION = """\
|
||||
@article{belle2023exploring,
|
||||
title={Exploring the Impact of Instruction Data Scaling on Large Language Models: An Empirical Study on Real-World Use Cases},
|
||||
title={Exploring the Impact of Instruction Data Scaling on Large Language Models},
|
||||
author={Yunjie Ji, Yong Deng, Yan Gong, Yiping Peng, Qiang Niu, Lei Zhang, Baochang Ma, Xiangang Li},
|
||||
journal={arXiv preprint arXiv:2303.14742},
|
||||
year={2023}
|
||||
}
|
||||
"""
|
||||
|
||||
_HOMEPAGE = "{}/datasets/BelleGroup/multiturn_chat_0.8M".format(_HF_ENDPOINT)
|
||||
_HOMEPAGE = f"{_HF_ENDPOINT}/datasets/BelleGroup/multiturn_chat_0.8M"
|
||||
_LICENSE = "gpl-3.0"
|
||||
_URL = "{}/datasets/BelleGroup/multiturn_chat_0.8M/resolve/main/multiturn_chat_0.8M.json".format(_HF_ENDPOINT)
|
||||
_URL = f"{_HF_ENDPOINT}/datasets/BelleGroup/multiturn_chat_0.8M/resolve/main/multiturn_chat_0.8M.json"
|
||||
|
||||
|
||||
class BelleMultiturn(datasets.GeneratorBasedBuilder):
|
||||
@ -38,7 +53,7 @@ class BelleMultiturn(datasets.GeneratorBasedBuilder):
|
||||
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": file_path})]
|
||||
|
||||
def _generate_examples(self, filepath: str):
|
||||
with open(filepath, "r", encoding="utf-8") as f:
|
||||
with open(filepath, encoding="utf-8") as f:
|
||||
for key, row in enumerate(f):
|
||||
data = json.loads(row)
|
||||
conversations = []
|
||||
|
300
data/c4_demo.jsonl
Normal file
@ -38,6 +38,20 @@
|
||||
"assistant_tag": "assistant"
|
||||
}
|
||||
},
|
||||
"mllm_audio_demo": {
|
||||
"file_name": "mllm_audio_demo.json",
|
||||
"formatting": "sharegpt",
|
||||
"columns": {
|
||||
"messages": "messages",
|
||||
"audios": "audios"
|
||||
},
|
||||
"tags": {
|
||||
"role_tag": "role",
|
||||
"content_tag": "content",
|
||||
"user_tag": "user",
|
||||
"assistant_tag": "assistant"
|
||||
}
|
||||
},
|
||||
"mllm_video_demo": {
|
||||
"file_name": "mllm_video_demo.json",
|
||||
"formatting": "sharegpt",
|
||||
@ -52,9 +66,25 @@
|
||||
"assistant_tag": "assistant"
|
||||
}
|
||||
},
|
||||
"mllm_video_audio_demo": {
|
||||
"file_name": "mllm_video_audio_demo.json",
|
||||
"formatting": "sharegpt",
|
||||
"columns": {
|
||||
"messages": "messages",
|
||||
"videos": "videos",
|
||||
"audios": "audios"
|
||||
},
|
||||
"tags": {
|
||||
"role_tag": "role",
|
||||
"content_tag": "content",
|
||||
"user_tag": "user",
|
||||
"assistant_tag": "assistant"
|
||||
}
|
||||
},
|
||||
"alpaca_en": {
|
||||
"hf_hub_url": "llamafactory/alpaca_en",
|
||||
"ms_hub_url": "llamafactory/alpaca_en"
|
||||
"ms_hub_url": "llamafactory/alpaca_en",
|
||||
"om_hub_url": "HaM/alpaca_en"
|
||||
},
|
||||
"alpaca_zh": {
|
||||
"hf_hub_url": "llamafactory/alpaca_zh",
|
||||
@ -66,7 +96,8 @@
|
||||
},
|
||||
"alpaca_gpt4_zh": {
|
||||
"hf_hub_url": "llamafactory/alpaca_gpt4_zh",
|
||||
"ms_hub_url": "llamafactory/alpaca_gpt4_zh"
|
||||
"ms_hub_url": "llamafactory/alpaca_gpt4_zh",
|
||||
"om_hub_url": "State_Cloud/alpaca-gpt4-data-zh"
|
||||
},
|
||||
"glaive_toolcall_en": {
|
||||
"hf_hub_url": "llamafactory/glaive_toolcall_en",
|
||||
@ -216,6 +247,7 @@
|
||||
"ultrachat_200k": {
|
||||
"hf_hub_url": "HuggingFaceH4/ultrachat_200k",
|
||||
"ms_hub_url": "AI-ModelScope/ultrachat_200k",
|
||||
"split": "train_sft",
|
||||
"formatting": "sharegpt",
|
||||
"columns": {
|
||||
"messages": "messages"
|
||||
@ -242,7 +274,7 @@
|
||||
"tags": {
|
||||
"role_tag": "role",
|
||||
"content_tag": "content",
|
||||
"user_tag": "human",
|
||||
"user_tag": "user",
|
||||
"assistant_tag": "assistant"
|
||||
}
|
||||
},
|
||||
@ -294,6 +326,46 @@
|
||||
"response": "answer"
|
||||
}
|
||||
},
|
||||
"openo1_sft": {
|
||||
"hf_hub_url": "llamafactory/OpenO1-SFT",
|
||||
"ms_hub_url": "llamafactory/OpenO1-SFT",
|
||||
"columns": {
|
||||
"prompt": "prompt",
|
||||
"response": "response"
|
||||
}
|
||||
},
|
||||
"open_thoughts": {
|
||||
"hf_hub_url": "llamafactory/OpenThoughts-114k",
|
||||
"formatting": "sharegpt",
|
||||
"columns": {
|
||||
"messages": "messages"
|
||||
},
|
||||
"tags": {
|
||||
"role_tag": "role",
|
||||
"content_tag": "content",
|
||||
"user_tag": "user",
|
||||
"assistant_tag": "assistant",
|
||||
"system_tag": "system"
|
||||
}
|
||||
},
|
||||
"open_r1_math": {
|
||||
"hf_hub_url": "llamafactory/OpenR1-Math-94k",
|
||||
"formatting": "sharegpt",
|
||||
"columns": {
|
||||
"messages": "messages"
|
||||
},
|
||||
"tags": {
|
||||
"role_tag": "role",
|
||||
"content_tag": "content",
|
||||
"user_tag": "user",
|
||||
"assistant_tag": "assistant",
|
||||
"system_tag": "system"
|
||||
}
|
||||
},
|
||||
"chinese_r1_distill": {
|
||||
"hf_hub_url": "Congliu/Chinese-DeepSeek-R1-Distill-data-110k-SFT",
|
||||
"ms_hub_url": "liucong/Chinese-DeepSeek-R1-Distill-data-110k-SFT"
|
||||
},
|
||||
"llava_1k_en": {
|
||||
"hf_hub_url": "BUAADreamer/llava-en-zh-2k",
|
||||
"subset": "en",
|
||||
@ -424,7 +496,7 @@
|
||||
}
|
||||
},
|
||||
"dpo_mix_en": {
|
||||
"hf_hub_url": "hiyouga/DPO-En-Zh-20k",
|
||||
"hf_hub_url": "llamafactory/DPO-En-Zh-20k",
|
||||
"subset": "en",
|
||||
"ranking": true,
|
||||
"formatting": "sharegpt",
|
||||
@ -435,7 +507,7 @@
|
||||
}
|
||||
},
|
||||
"dpo_mix_zh": {
|
||||
"hf_hub_url": "hiyouga/DPO-En-Zh-20k",
|
||||
"hf_hub_url": "llamafactory/DPO-En-Zh-20k",
|
||||
"subset": "zh",
|
||||
"ranking": true,
|
||||
"formatting": "sharegpt",
|
||||
@ -455,6 +527,16 @@
|
||||
"rejected": "rejected"
|
||||
}
|
||||
},
|
||||
"coig_p": {
|
||||
"hf_hub_url": "m-a-p/COIG-P",
|
||||
"ranking": true,
|
||||
"formatting": "sharegpt",
|
||||
"columns": {
|
||||
"messages": "conversations",
|
||||
"chosen": "chosen",
|
||||
"rejected": "rejected"
|
||||
}
|
||||
},
|
||||
"rlhf_v": {
|
||||
"hf_hub_url": "llamafactory/RLHF-V",
|
||||
"ranking": true,
|
||||
@ -477,6 +559,16 @@
|
||||
"images": "images"
|
||||
}
|
||||
},
|
||||
"rlaif_v": {
|
||||
"hf_hub_url": "openbmb/RLAIF-V-Dataset",
|
||||
"ranking": true,
|
||||
"columns": {
|
||||
"prompt": "question",
|
||||
"chosen": "chosen",
|
||||
"rejected": "rejected",
|
||||
"images": "image"
|
||||
}
|
||||
},
|
||||
"orca_pairs": {
|
||||
"hf_hub_url": "Intel/orca_dpo_pairs",
|
||||
"ranking": true,
|
||||
@ -550,7 +642,7 @@
|
||||
}
|
||||
},
|
||||
"c4_demo": {
|
||||
"file_name": "c4_demo.json",
|
||||
"file_name": "c4_demo.jsonl",
|
||||
"columns": {
|
||||
"prompt": "text"
|
||||
}
|
||||
@ -623,4 +715,4 @@
|
||||
},
|
||||
"folder": "python"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -7223,4 +7223,4 @@
|
||||
"value": "Abstraction is a principle in object-oriented programming that refers to the process of focusing on the essential features of an object or concept without emphasizing the details that are not important for its functionality. It enables programmers to create classes that represent the objects they want to work with, and specifies only the behavior of those objects, leaving out unnecessary details. Abstraction helps to make the code more maintainable, modular, and scalable. It also improves the productivity of developers by reducing the amount of code they need to write."
|
||||
}
|
||||
}
|
||||
]
|
||||
]
|
||||
|
@ -5055,4 +5055,4 @@
|
||||
"value": "C. 参与讨论"
|
||||
}
|
||||
}
|
||||
]
|
||||
]
|
||||
|
@ -9155,4 +9155,4 @@
|
||||
],
|
||||
"tools": "[]"
|
||||
}
|
||||
]
|
||||
]
|
||||
|
@ -9019,4 +9019,4 @@
|
||||
],
|
||||
"tools": "[]"
|
||||
}
|
||||
]
|
||||
]
|
||||
|
@ -1,6 +1,20 @@
|
||||
# Copyright 2025 the LlamaFactory team.
|
||||
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import json
|
||||
import os
|
||||
from typing import List
|
||||
|
||||
import datasets
|
||||
|
||||
@ -8,9 +22,9 @@ import datasets
|
||||
_HF_ENDPOINT = os.getenv("HF_ENDPOINT", "https://huggingface.co")
|
||||
_DESCRIPTION = "Human preference data about helpfulness and harmlessness."
|
||||
_CITATION = ""
|
||||
_HOMEPAGE = "{}/datasets/Anthropic/hh-rlhf".format(_HF_ENDPOINT)
|
||||
_HOMEPAGE = f"{_HF_ENDPOINT}/datasets/Anthropic/hh-rlhf"
|
||||
_LICENSE = "mit"
|
||||
_URL = "{}/datasets/Anthropic/hh-rlhf/resolve/main/".format(_HF_ENDPOINT)
|
||||
_URL = f"{_HF_ENDPOINT}/datasets/Anthropic/hh-rlhf/resolve/main/"
|
||||
_URLS = {
|
||||
"train": [
|
||||
_URL + "harmless-base/train.jsonl.gz",
|
||||
@ -50,10 +64,10 @@ class HhRlhfEn(datasets.GeneratorBasedBuilder):
|
||||
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepaths": file_path["test"]}),
|
||||
]
|
||||
|
||||
def _generate_examples(self, filepaths: List[str]):
|
||||
def _generate_examples(self, filepaths: list[str]):
|
||||
key = 0
|
||||
for filepath in filepaths:
|
||||
with open(filepath, "r", encoding="utf-8") as f:
|
||||
with open(filepath, encoding="utf-8") as f:
|
||||
for row in f:
|
||||
data = json.loads(row)
|
||||
chosen = data["chosen"]
|
||||
|
@ -454,4 +454,4 @@
|
||||
"input": "",
|
||||
"output": "抱歉,我不是 OpenAI 开发的 ChatGPT,我是 {{author}} 开发的 {{name}},旨在为用户提供智能化的回答和帮助。"
|
||||
}
|
||||
]
|
||||
]
|
||||
|
@ -5395,4 +5395,4 @@
|
||||
],
|
||||
"label": false
|
||||
}
|
||||
]
|
||||
]
|
||||
|
47
data/mllm_audio_demo.json
Normal file
@ -0,0 +1,47 @@
|
||||
[
|
||||
{
|
||||
"messages": [
|
||||
{
|
||||
"content": "<audio>What's that sound?",
|
||||
"role": "user"
|
||||
},
|
||||
{
|
||||
"content": "It is the sound of glass shattering.",
|
||||
"role": "assistant"
|
||||
}
|
||||
],
|
||||
"audios": [
|
||||
"mllm_demo_data/1.mp3"
|
||||
]
|
||||
},
|
||||
{
|
||||
"messages": [
|
||||
{
|
||||
"content": "<audio>What can you hear?",
|
||||
"role": "user"
|
||||
},
|
||||
{
|
||||
"content": "A woman is coughing.",
|
||||
"role": "assistant"
|
||||
}
|
||||
],
|
||||
"audios": [
|
||||
"mllm_demo_data/2.wav"
|
||||
]
|
||||
},
|
||||
{
|
||||
"messages": [
|
||||
{
|
||||
"content": "<audio>What does the person say?",
|
||||
"role": "user"
|
||||
},
|
||||
{
|
||||
"content": "Mister Quiller is the apostle of the middle classes and we are glad to welcome his gospel.",
|
||||
"role": "assistant"
|
||||
}
|
||||
],
|
||||
"audios": [
|
||||
"mllm_demo_data/3.flac"
|
||||
]
|
||||
}
|
||||
]
|
@ -10,7 +10,7 @@
|
||||
"role": "assistant"
|
||||
},
|
||||
{
|
||||
"content": "What are they doing?",
|
||||
"content": "What are they doing?<image>",
|
||||
"role": "user"
|
||||
},
|
||||
{
|
||||
@ -19,6 +19,7 @@
|
||||
}
|
||||
],
|
||||
"images": [
|
||||
"mllm_demo_data/1.jpg",
|
||||
"mllm_demo_data/1.jpg"
|
||||
]
|
||||
},
|
||||
@ -79,7 +80,7 @@
|
||||
"role": "assistant"
|
||||
},
|
||||
{
|
||||
"content": "他们在做什么?",
|
||||
"content": "他们在做什么?<image>",
|
||||
"role": "user"
|
||||
},
|
||||
{
|
||||
@ -88,6 +89,7 @@
|
||||
}
|
||||
],
|
||||
"images": [
|
||||
"mllm_demo_data/1.jpg",
|
||||
"mllm_demo_data/1.jpg"
|
||||
]
|
||||
},
|
||||
@ -137,4 +139,4 @@
|
||||
"mllm_demo_data/3.jpg"
|
||||
]
|
||||
}
|
||||
]
|
||||
]
|
||||
|
BIN
data/mllm_demo_data/1.mp3
Normal file
BIN
data/mllm_demo_data/2.wav
Normal file
BIN
data/mllm_demo_data/3.flac
Normal file
BIN
data/mllm_demo_data/4.mp3
Normal file
BIN
data/mllm_demo_data/4.mp4
Normal file
56
data/mllm_video_audio_demo.json
Normal file
@ -0,0 +1,56 @@
|
||||
[
|
||||
{
|
||||
"messages": [
|
||||
{
|
||||
"content": "<video><audio>What is the video describing?",
|
||||
"role": "user"
|
||||
},
|
||||
{
|
||||
"content": "A girl who is drawing a picture of a guitar and feel nervous.",
|
||||
"role": "assistant"
|
||||
}
|
||||
],
|
||||
"videos": [
|
||||
"mllm_demo_data/4.mp4"
|
||||
],
|
||||
"audios": [
|
||||
"mllm_demo_data/4.mp3"
|
||||
]
|
||||
},
|
||||
{
|
||||
"messages": [
|
||||
{
|
||||
"content": "<video><audio>What does this girl say?",
|
||||
"role": "user"
|
||||
},
|
||||
{
|
||||
"content": "She says: 'Hello! Take a look at what am I drawing!'",
|
||||
"role": "assistant"
|
||||
}
|
||||
],
|
||||
"videos": [
|
||||
"mllm_demo_data/4.mp4"
|
||||
],
|
||||
"audios": [
|
||||
"mllm_demo_data/4.mp3"
|
||||
]
|
||||
},
|
||||
{
|
||||
"messages": [
|
||||
{
|
||||
"content": "<video><audio>What is this girl drawing with?",
|
||||
"role": "user"
|
||||
},
|
||||
{
|
||||
"content": "She is drawing with an iPad.",
|
||||
"role": "assistant"
|
||||
}
|
||||
],
|
||||
"videos": [
|
||||
"mllm_demo_data/4.mp4"
|
||||
],
|
||||
"audios": [
|
||||
"mllm_demo_data/4.mp3"
|
||||
]
|
||||
}
|
||||
]
|
@ -44,4 +44,4 @@
|
||||
"mllm_demo_data/3.mp4"
|
||||
]
|
||||
}
|
||||
]
|
||||
]
|
||||
|
@ -1,6 +1,20 @@
|
||||
# Copyright 2025 the LlamaFactory team.
|
||||
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import json
|
||||
import os
|
||||
from typing import List
|
||||
|
||||
import datasets
|
||||
|
||||
@ -11,7 +25,7 @@ _DESCRIPTION = "UltraChat: Large-scale, Informative, and Diverse Multi-round Dia
|
||||
|
||||
_CITATION = """\
|
||||
@misc{UltraChat,
|
||||
author = {Ding, Ning and Chen, Yulin and Xu, Bokai and Hu, Shengding and Qin, Yujia and Liu, Zhiyuan and Sun, Maosong and Zhou, Bowen},
|
||||
author = {Ding, Ning and Chen, Yulin and Xu, Bokai and Hu, Shengding and others},
|
||||
title = {UltraChat: A Large-scale Auto-generated Multi-round Dialogue Data},
|
||||
year = {2023},
|
||||
publisher = {GitHub},
|
||||
@ -20,9 +34,9 @@ _CITATION = """\
|
||||
}
|
||||
"""
|
||||
|
||||
_HOMEPAGE = "{}/datasets/stingning/ultrachat".format(_HF_ENDPOINT)
|
||||
_HOMEPAGE = f"{_HF_ENDPOINT}/datasets/stingning/ultrachat"
|
||||
_LICENSE = "cc-by-nc-4.0"
|
||||
_BASE_DATA_URL = "{}/datasets/stingning/ultrachat/resolve/main/train_{{idx}}.jsonl".format(_HF_ENDPOINT)
|
||||
_BASE_DATA_URL = f"{_HF_ENDPOINT}/datasets/stingning/ultrachat/resolve/main/train_{{idx}}.jsonl"
|
||||
|
||||
|
||||
class UltraChat(datasets.GeneratorBasedBuilder):
|
||||
@ -40,16 +54,16 @@ class UltraChat(datasets.GeneratorBasedBuilder):
|
||||
file_paths = [dl_manager.download(_BASE_DATA_URL.format(idx=idx)) for idx in range(10)] # multiple shards
|
||||
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": file_paths})]
|
||||
|
||||
def _generate_examples(self, filepaths: List[str]):
|
||||
def _generate_examples(self, filepaths: list[str]):
|
||||
for filepath in filepaths:
|
||||
with open(filepath, "r", encoding="utf-8") as f:
|
||||
with open(filepath, encoding="utf-8") as f:
|
||||
for row in f:
|
||||
try:
|
||||
data = json.loads(row)
|
||||
except Exception:
|
||||
continue
|
||||
key: int = data["id"]
|
||||
content: List[str] = data["data"]
|
||||
content: list[str] = data["data"]
|
||||
if len(content) % 2 == 1:
|
||||
content.pop(-1)
|
||||
if len(content) < 2:
|
||||
|
@ -1,59 +1,66 @@
|
||||
# Use the NVIDIA official image with PyTorch 2.3.0
|
||||
# https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/rel-24-02.html
|
||||
FROM nvcr.io/nvidia/pytorch:24.02-py3
|
||||
# https://hub.docker.com/r/hiyouga/pytorch/tags
|
||||
ARG BASE_IMAGE=hiyouga/pytorch:th2.6.0-cu124-flashattn2.7.4-cxx11abi0-devel
|
||||
FROM ${BASE_IMAGE}
|
||||
|
||||
# Installation arguments
|
||||
ARG PIP_INDEX=https://pypi.org/simple
|
||||
ARG EXTRAS=metrics
|
||||
ARG INSTALL_FLASHATTN=false
|
||||
ARG HTTP_PROXY=""
|
||||
|
||||
# Define environments
|
||||
ENV MAX_JOBS=4
|
||||
ENV MAX_JOBS=16
|
||||
ENV FLASH_ATTENTION_FORCE_BUILD=TRUE
|
||||
ENV VLLM_WORKER_MULTIPROC_METHOD=spawn
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
ENV NODE_OPTIONS=""
|
||||
ENV PIP_ROOT_USER_ACTION=ignore
|
||||
ENV http_proxy="${HTTP_PROXY}"
|
||||
ENV https_proxy="${HTTP_PROXY}"
|
||||
|
||||
# Define installation arguments
|
||||
ARG INSTALL_BNB=false
|
||||
ARG INSTALL_VLLM=false
|
||||
ARG INSTALL_DEEPSPEED=false
|
||||
ARG INSTALL_FLASHATTN=false
|
||||
ARG PIP_INDEX=https://pypi.org/simple
|
||||
# Use Bash instead of default /bin/sh
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
|
||||
# Set the working directory
|
||||
WORKDIR /app
|
||||
|
||||
# Change pip source
|
||||
RUN pip config set global.index-url "${PIP_INDEX}" && \
|
||||
pip config set global.extra-index-url "${PIP_INDEX}" && \
|
||||
pip install --no-cache-dir --upgrade pip packaging wheel setuptools
|
||||
|
||||
# Install the requirements
|
||||
COPY requirements.txt /app
|
||||
RUN pip config set global.index-url "$PIP_INDEX" && \
|
||||
pip config set global.extra-index-url "$PIP_INDEX" && \
|
||||
python -m pip install --upgrade pip && \
|
||||
python -m pip install -r requirements.txt
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
# Copy the rest of the application into the image
|
||||
COPY . /app
|
||||
|
||||
# Install the LLaMA Factory
|
||||
RUN EXTRA_PACKAGES="metrics"; \
|
||||
if [ "$INSTALL_BNB" == "true" ]; then \
|
||||
EXTRA_PACKAGES="${EXTRA_PACKAGES},bitsandbytes"; \
|
||||
fi; \
|
||||
if [ "$INSTALL_VLLM" == "true" ]; then \
|
||||
EXTRA_PACKAGES="${EXTRA_PACKAGES},vllm"; \
|
||||
fi; \
|
||||
if [ "$INSTALL_DEEPSPEED" == "true" ]; then \
|
||||
EXTRA_PACKAGES="${EXTRA_PACKAGES},deepspeed"; \
|
||||
fi; \
|
||||
pip install -e ".[$EXTRA_PACKAGES]"
|
||||
# Install LLaMA Factory
|
||||
RUN pip install --no-cache-dir -e ".[${EXTRAS}]" --no-build-isolation
|
||||
|
||||
# Rebuild flash attention
|
||||
RUN pip uninstall -y transformer-engine flash-attn && \
|
||||
if [ "$INSTALL_FLASHATTN" == "true" ]; then \
|
||||
pip uninstall -y ninja && pip install ninja && \
|
||||
RUN if [ "${INSTALL_FLASHATTN}" == "true" ]; then \
|
||||
pip uninstall -y ninja && \
|
||||
pip install --no-cache-dir ninja && \
|
||||
pip install --no-cache-dir flash-attn --no-build-isolation; \
|
||||
fi
|
||||
|
||||
# Set up volumes
|
||||
VOLUME [ "/root/.cache/huggingface", "/root/.cache/modelscope", "/app/data", "/app/output" ]
|
||||
# VOLUME [ "/root/.cache/huggingface", "/app/shared_data", "/app/output" ]
|
||||
|
||||
# Expose port 7860 for the LLaMA Board
|
||||
ENV GRADIO_SERVER_PORT 7860
|
||||
# Expose port 7860 for LLaMA Board
|
||||
ENV GRADIO_SERVER_PORT=7860
|
||||
EXPOSE 7860
|
||||
|
||||
# Expose port 8000 for the API service
|
||||
ENV API_PORT 8000
|
||||
# Expose port 8000 for API service
|
||||
ENV API_PORT=8000
|
||||
EXPOSE 8000
|
||||
|
||||
# unset proxy
|
||||
ENV http_proxy=
|
||||
ENV https_proxy=
|
||||
|
||||
# Reset pip config
|
||||
RUN pip config unset global.index-url && \
|
||||
pip config unset global.extra-index-url
|
||||
|
55
docker/docker-cuda/Dockerfile.base
Normal file
@ -0,0 +1,55 @@
|
||||
# Start from the pytorch official image (ubuntu-22.04 + cuda-12.4.1 + python-3.11)
|
||||
# https://hub.docker.com/r/pytorch/pytorch/tags
|
||||
FROM pytorch/pytorch:2.6.0-cuda12.4-cudnn9-devel
|
||||
|
||||
# Define environments
|
||||
ENV MAX_JOBS=16
|
||||
ENV VLLM_WORKER_MULTIPROC_METHOD=spawn
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
ENV NODE_OPTIONS=""
|
||||
ENV PIP_ROOT_USER_ACTION=ignore
|
||||
|
||||
# Define installation arguments
|
||||
ARG APT_SOURCE=https://mirrors.tuna.tsinghua.edu.cn/ubuntu/
|
||||
ARG PIP_INDEX=https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple
|
||||
|
||||
# Set apt source
|
||||
RUN cp /etc/apt/sources.list /etc/apt/sources.list.bak && \
|
||||
{ \
|
||||
echo "deb ${APT_SOURCE} jammy main restricted universe multiverse"; \
|
||||
echo "deb ${APT_SOURCE} jammy-updates main restricted universe multiverse"; \
|
||||
echo "deb ${APT_SOURCE} jammy-backports main restricted universe multiverse"; \
|
||||
echo "deb ${APT_SOURCE} jammy-security main restricted universe multiverse"; \
|
||||
} > /etc/apt/sources.list
|
||||
|
||||
# Install systemctl and wget
|
||||
RUN apt-get update && \
|
||||
apt-get install -y -o Dpkg::Options::="--force-confdef" systemd wget && \
|
||||
apt-get clean
|
||||
|
||||
# Install git and vim
|
||||
RUN apt-get update && \
|
||||
apt-get install -y git vim && \
|
||||
apt-get clean
|
||||
|
||||
# Install gcc and g++
|
||||
RUN apt-get update && \
|
||||
apt-get install -y gcc g++ && \
|
||||
apt-get clean
|
||||
|
||||
# Change pip source
|
||||
RUN pip config set global.index-url "${PIP_INDEX}" && \
|
||||
pip config set global.extra-index-url "${PIP_INDEX}" && \
|
||||
pip install --no-cache-dir --upgrade pip packaging wheel setuptools
|
||||
|
||||
# Install flash-attn-2.7.4.post1 (cxx11abi=False)
|
||||
RUN wget -nv https://github.com/Dao-AILab/flash-attention/releases/download/v2.7.4.post1/flash_attn-2.7.4.post1+cu12torch2.6cxx11abiFALSE-cp311-cp311-linux_x86_64.whl && \
|
||||
pip install --no-cache-dir flash_attn-2.7.4.post1+cu12torch2.6cxx11abiFALSE-cp311-cp311-linux_x86_64.whl
|
||||
|
||||
# Install flashinfer-0.2.2.post1+cu124 (cxx11abi=False)
|
||||
RUN wget -nv https://github.com/flashinfer-ai/flashinfer/releases/download/v0.2.2.post1/flashinfer_python-0.2.2.post1+cu124torch2.6-cp38-abi3-linux_x86_64.whl && \
|
||||
pip install --no-cache-dir flashinfer_python-0.2.2.post1+cu124torch2.6-cp38-abi3-linux_x86_64.whl
|
||||
|
||||
# Reset pip config
|
||||
RUN pip config unset global.index-url && \
|
||||
pip config unset global.extra-index-url
|
111
docker/docker-cuda/README.md
Normal file
@ -0,0 +1,111 @@
|
||||
# Docker Setup for NVIDIA GPUs
|
||||
|
||||
This directory contains Docker configuration files for running LLaMA Factory with NVIDIA GPU support.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
### Linux-specific Requirements
|
||||
|
||||
Before running the Docker container with GPU support, you need to install the following packages:
|
||||
|
||||
1. **Docker**: The container runtime
|
||||
```bash
|
||||
# Ubuntu/Debian
|
||||
sudo apt-get update
|
||||
sudo apt-get install docker.io
|
||||
|
||||
# Or install Docker Engine from the official repository:
|
||||
# https://docs.docker.com/engine/install/
|
||||
```
|
||||
|
||||
2. **Docker Compose** (if using the docker-compose method):
|
||||
```bash
|
||||
# Ubuntu/Debian
|
||||
sudo apt-get install docker-compose
|
||||
|
||||
# Or install the latest version:
|
||||
# https://docs.docker.com/compose/install/
|
||||
```
|
||||
|
||||
3. **NVIDIA Container Toolkit** (required for GPU support):
|
||||
```bash
|
||||
# Add the NVIDIA GPG key and repository
|
||||
distribution=$(. /etc/os-release;echo $ID$VERSION_ID)
|
||||
curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | sudo apt-key add -
|
||||
curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.list | sudo tee /etc/apt/sources.list.d/nvidia-docker.list
|
||||
|
||||
# Install nvidia-container-toolkit
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y nvidia-container-toolkit
|
||||
|
||||
# Restart Docker to apply changes
|
||||
sudo systemctl restart docker
|
||||
```
|
||||
|
||||
**Note**: Without `nvidia-container-toolkit`, the Docker container will not be able to access your NVIDIA GPU.
|
||||
|
||||
### Verify GPU Access
|
||||
|
||||
After installation, verify that Docker can access your GPU:
|
||||
|
||||
```bash
|
||||
sudo docker run --rm --gpus all nvidia/cuda:12.4.0-base-ubuntu22.04 nvidia-smi
|
||||
```
|
||||
|
||||
If successful, you should see your GPU information displayed.
|
||||
|
||||
## Usage
|
||||
|
||||
### Using Docker Compose (Recommended)
|
||||
|
||||
```bash
|
||||
cd docker/docker-cuda/
|
||||
docker compose up -d
|
||||
docker compose exec llamafactory bash
|
||||
```
|
||||
|
||||
### Using Docker Run
|
||||
|
||||
```bash
|
||||
# Build the image
|
||||
docker build -f ./docker/docker-cuda/Dockerfile \
|
||||
--build-arg PIP_INDEX=https://pypi.org/simple \
|
||||
--build-arg EXTRAS=metrics \
|
||||
-t llamafactory:latest .
|
||||
|
||||
# Run the container
|
||||
docker run -dit --ipc=host --gpus=all \
|
||||
-p 7860:7860 \
|
||||
-p 8000:8000 \
|
||||
--name llamafactory \
|
||||
llamafactory:latest
|
||||
|
||||
# Enter the container
|
||||
docker exec -it llamafactory bash
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### GPU Not Detected
|
||||
|
||||
If your GPU is not detected inside the container:
|
||||
|
||||
1. Ensure `nvidia-container-toolkit` is installed
|
||||
2. Check that the Docker daemon has been restarted after installation
|
||||
3. Verify your NVIDIA drivers are properly installed: `nvidia-smi`
|
||||
4. Check Docker GPU support: `docker run --rm --gpus all ubuntu nvidia-smi`
|
||||
|
||||
### Permission Denied
|
||||
|
||||
If you get permission errors, ensure your user is in the docker group:
|
||||
|
||||
```bash
|
||||
sudo usermod -aG docker $USER
|
||||
# Log out and back in for changes to take effect
|
||||
```
|
||||
|
||||
## Additional Notes
|
||||
|
||||
- The default image is built on Ubuntu 22.04 (x86_64), CUDA 12.4, Python 3.11, PyTorch 2.6.0, and Flash-attn 2.7.4
|
||||
- For different CUDA versions, you may need to adjust the base image in the Dockerfile
|
||||
- Make sure your NVIDIA driver version is compatible with the CUDA version used in the Docker image
|
@ -4,22 +4,15 @@ services:
|
||||
dockerfile: ./docker/docker-cuda/Dockerfile
|
||||
context: ../..
|
||||
args:
|
||||
INSTALL_BNB: false
|
||||
INSTALL_VLLM: false
|
||||
INSTALL_DEEPSPEED: false
|
||||
INSTALL_FLASHATTN: false
|
||||
PIP_INDEX: https://pypi.org/simple
|
||||
EXTRAS: metrics
|
||||
container_name: llamafactory
|
||||
volumes:
|
||||
- ../../hf_cache:/root/.cache/huggingface
|
||||
- ../../ms_cache:/root/.cache/modelscope
|
||||
- ../../data:/app/data
|
||||
- ../../output:/app/output
|
||||
ports:
|
||||
- "7860:7860"
|
||||
- "8000:8000"
|
||||
ipc: host
|
||||
tty: true
|
||||
# shm_size: "16gb" # ipc: host is set
|
||||
stdin_open: true
|
||||
command: bash
|
||||
deploy:
|
||||
@ -28,5 +21,5 @@ services:
|
||||
devices:
|
||||
- driver: nvidia
|
||||
count: "all"
|
||||
capabilities: [gpu]
|
||||
capabilities: [ gpu ]
|
||||
restart: unless-stopped
|
||||
|
@ -1,45 +1,63 @@
|
||||
# Use the Ubuntu 22.04 image with CANN 8.0.rc1
|
||||
# More versions can be found at https://hub.docker.com/r/ascendai/cann/tags
|
||||
# FROM ascendai/cann:8.0.rc1-910-ubuntu22.04-py3.8
|
||||
FROM ascendai/cann:8.0.rc1-910b-ubuntu22.04-py3.8
|
||||
# FROM ascendai/cann:8.0.rc1-910-openeuler22.03-py3.8
|
||||
# FROM ascendai/cann:8.0.rc1-910b-openeuler22.03-py3.8
|
||||
# https://hub.docker.com/r/ascendai/cann/tags
|
||||
ARG BASE_IMAGE=ascendai/cann:8.1.rc1-910b-ubuntu22.04-py3.11
|
||||
FROM ${BASE_IMAGE}
|
||||
|
||||
# Installation arguments
|
||||
ARG PIP_INDEX=https://pypi.org/simple
|
||||
ARG EXTRAS=torch-npu,metrics
|
||||
ARG HTTP_PROXY=""
|
||||
ARG PYTORCH_INDEX=https://download.pytorch.org/whl/cpu
|
||||
|
||||
# Define environments
|
||||
ENV MAX_JOBS=16
|
||||
ENV FLASH_ATTENTION_FORCE_BUILD=TRUE
|
||||
ENV VLLM_WORKER_MULTIPROC_METHOD=spawn
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
ENV NODE_OPTIONS=""
|
||||
ENV PIP_ROOT_USER_ACTION=ignore
|
||||
ENV http_proxy="${HTTP_PROXY}"
|
||||
ENV https_proxy="${HTTP_PROXY}"
|
||||
|
||||
# Define installation arguments
|
||||
ARG INSTALL_DEEPSPEED=false
|
||||
ARG PIP_INDEX=https://pypi.org/simple
|
||||
ARG TORCH_INDEX=https://download.pytorch.org/whl/cpu
|
||||
# Use Bash instead of default /bin/sh
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
|
||||
# Set the working directory
|
||||
WORKDIR /app
|
||||
|
||||
# Change pip source
|
||||
RUN pip config set global.index-url "${PIP_INDEX}" && \
|
||||
pip config set global.extra-index-url "${PIP_INDEX}" && \
|
||||
pip install --no-cache-dir --upgrade pip packaging wheel setuptools
|
||||
|
||||
# Install torch-npu
|
||||
RUN pip uninstall -y torch torchvision torchaudio && \
|
||||
pip install --no-cache-dir "torch-npu==2.5.1" "torchvision==0.20.1" --index-url "${PYTORCH_INDEX}"
|
||||
|
||||
# Install the requirements
|
||||
COPY requirements.txt /app
|
||||
RUN pip config set global.index-url "$PIP_INDEX" && \
|
||||
pip config set global.extra-index-url "$TORCH_INDEX" && \
|
||||
python -m pip install --upgrade pip && \
|
||||
python -m pip install -r requirements.txt
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
# Copy the rest of the application into the image
|
||||
COPY . /app
|
||||
|
||||
# Install the LLaMA Factory
|
||||
RUN EXTRA_PACKAGES="torch-npu,metrics"; \
|
||||
if [ "$INSTALL_DEEPSPEED" == "true" ]; then \
|
||||
EXTRA_PACKAGES="${EXTRA_PACKAGES},deepspeed"; \
|
||||
fi; \
|
||||
pip install -e ".[$EXTRA_PACKAGES]"
|
||||
# Install LLaMA Factory
|
||||
RUN pip install --no-cache-dir -e ".[${EXTRAS}]" --no-build-isolation
|
||||
|
||||
# Set up volumes
|
||||
VOLUME [ "/root/.cache/huggingface", "/root/.cache/modelscope", "/app/data", "/app/output" ]
|
||||
# VOLUME [ "/root/.cache/huggingface", "/app/shared_data", "/app/output" ]
|
||||
|
||||
# Expose port 7860 for the LLaMA Board
|
||||
ENV GRADIO_SERVER_PORT 7860
|
||||
# Expose port 7860 for LLaMA Board
|
||||
ENV GRADIO_SERVER_PORT=7860
|
||||
EXPOSE 7860
|
||||
|
||||
# Expose port 8000 for the API service
|
||||
ENV API_PORT 8000
|
||||
# Expose port 8000 for API service
|
||||
ENV API_PORT=8000
|
||||
EXPOSE 8000
|
||||
|
||||
# unset proxy
|
||||
ENV http_proxy=
|
||||
ENV https_proxy=
|
||||
|
||||
# Reset pip config
|
||||
RUN pip config unset global.index-url && \
|
||||
pip config unset global.extra-index-url
|
||||
|
@ -4,14 +4,10 @@ services:
|
||||
dockerfile: ./docker/docker-npu/Dockerfile
|
||||
context: ../..
|
||||
args:
|
||||
INSTALL_DEEPSPEED: false
|
||||
PIP_INDEX: https://pypi.org/simple
|
||||
EXTRAS: torch-npu,metrics
|
||||
container_name: llamafactory
|
||||
volumes:
|
||||
- ../../hf_cache:/root/.cache/huggingface
|
||||
- ../../ms_cache:/root/.cache/modelscope
|
||||
- ../../data:/app/data
|
||||
- ../../output:/app/output
|
||||
- /usr/local/dcmi:/usr/local/dcmi
|
||||
- /usr/local/bin/npu-smi:/usr/local/bin/npu-smi
|
||||
- /usr/local/Ascend/driver:/usr/local/Ascend/driver
|
||||
@ -21,6 +17,7 @@ services:
|
||||
- "8000:8000"
|
||||
ipc: host
|
||||
tty: true
|
||||
# shm_size: "16gb" # ipc: host is set
|
||||
stdin_open: true
|
||||
command: bash
|
||||
devices:
|
||||
|
@ -1,57 +1,71 @@
|
||||
FROM hardandheavy/transformers-rocm:2.1.0
|
||||
# https://hub.docker.com/r/rocm/pytorch/tags
|
||||
ARG BASE_IMAGE=rocm/pytorch:rocm6.4.1_ubuntu22.04_py3.10_pytorch_release_2.6.0
|
||||
FROM ${BASE_IMAGE}
|
||||
|
||||
# Installation arguments
|
||||
ARG PIP_INDEX=https://pypi.org/simple
|
||||
ARG EXTRAS=metrics
|
||||
ARG INSTALL_FLASHATTN=false
|
||||
ARG HTTP_PROXY=""
|
||||
ARG PYTORCH_INDEX=https://download.pytorch.org/whl/rocm6.3
|
||||
|
||||
# Define environments
|
||||
ENV MAX_JOBS=4
|
||||
ENV MAX_JOBS=16
|
||||
ENV FLASH_ATTENTION_FORCE_BUILD=TRUE
|
||||
ENV VLLM_WORKER_MULTIPROC_METHOD=spawn
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
ENV NODE_OPTIONS=""
|
||||
ENV PIP_ROOT_USER_ACTION=ignore
|
||||
ENV http_proxy="${HTTP_PROXY}"
|
||||
ENV https_proxy="${HTTP_PROXY}"
|
||||
|
||||
# Define installation arguments
|
||||
ARG INSTALL_BNB=false
|
||||
ARG INSTALL_VLLM=false
|
||||
ARG INSTALL_DEEPSPEED=false
|
||||
ARG INSTALL_FLASHATTN=false
|
||||
ARG PIP_INDEX=https://pypi.org/simple
|
||||
# Use Bash instead of default /bin/sh
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
|
||||
# Set the working directory
|
||||
WORKDIR /app
|
||||
|
||||
# Change pip source
|
||||
RUN pip config set global.index-url "${PIP_INDEX}" && \
|
||||
pip config set global.extra-index-url "${PIP_INDEX}" && \
|
||||
pip install --no-cache-dir --upgrade pip packaging wheel setuptools
|
||||
|
||||
# Reinstall pytorch rocm
|
||||
RUN pip uninstall -y torch torchvision torchaudio && \
|
||||
pip install --no-cache-dir --pre torch torchvision torchaudio --index-url "${PYTORCH_INDEX}"
|
||||
|
||||
# Install the requirements
|
||||
COPY requirements.txt /app
|
||||
RUN pip config set global.index-url "$PIP_INDEX" && \
|
||||
pip config set global.extra-index-url "$PIP_INDEX" && \
|
||||
python -m pip install --upgrade pip && \
|
||||
python -m pip install -r requirements.txt
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
# Copy the rest of the application into the image
|
||||
COPY . /app
|
||||
|
||||
# Install the LLaMA Factory
|
||||
RUN EXTRA_PACKAGES="metrics"; \
|
||||
if [ "$INSTALL_BNB" == "true" ]; then \
|
||||
EXTRA_PACKAGES="${EXTRA_PACKAGES},bitsandbytes"; \
|
||||
fi; \
|
||||
if [ "$INSTALL_VLLM" == "true" ]; then \
|
||||
EXTRA_PACKAGES="${EXTRA_PACKAGES},vllm"; \
|
||||
fi; \
|
||||
if [ "$INSTALL_DEEPSPEED" == "true" ]; then \
|
||||
EXTRA_PACKAGES="${EXTRA_PACKAGES},deepspeed"; \
|
||||
fi; \
|
||||
pip install -e ".[$EXTRA_PACKAGES]"
|
||||
# Install LLaMA Factory
|
||||
RUN pip install --no-cache-dir -e ".[${EXTRAS}]" --no-build-isolation
|
||||
|
||||
# Rebuild flash attention
|
||||
RUN pip uninstall -y transformer-engine flash-attn && \
|
||||
if [ "$INSTALL_FLASHATTN" == "true" ]; then \
|
||||
pip uninstall -y ninja && pip install ninja && \
|
||||
RUN if [ "${INSTALL_FLASHATTN}" == "true" ]; then \
|
||||
pip uninstall -y ninja && \
|
||||
pip install --no-cache-dir ninja && \
|
||||
pip install --no-cache-dir flash-attn --no-build-isolation; \
|
||||
fi
|
||||
|
||||
# Set up volumes
|
||||
VOLUME [ "/root/.cache/huggingface", "/root/.cache/modelscope", "/app/data", "/app/output" ]
|
||||
# VOLUME [ "/root/.cache/huggingface", "/app/shared_data", "/app/output" ]
|
||||
|
||||
# Expose port 7860 for the LLaMA Board
|
||||
ENV GRADIO_SERVER_PORT 7860
|
||||
# Expose port 7860 for LLaMA Board
|
||||
ENV GRADIO_SERVER_PORT=7860
|
||||
EXPOSE 7860
|
||||
|
||||
# Expose port 8000 for the API service
|
||||
ENV API_PORT 8000
|
||||
# Expose port 8000 for API service
|
||||
ENV API_PORT=8000
|
||||
EXPOSE 8000
|
||||
|
||||
# unset proxy
|
||||
ENV http_proxy=
|
||||
ENV https_proxy=
|
||||
|
||||
# Reset pip config
|
||||
RUN pip config unset global.index-url && \
|
||||
pip config unset global.extra-index-url
|
||||
|
@ -4,23 +4,15 @@ services:
|
||||
dockerfile: ./docker/docker-rocm/Dockerfile
|
||||
context: ../..
|
||||
args:
|
||||
INSTALL_BNB: false
|
||||
INSTALL_VLLM: false
|
||||
INSTALL_DEEPSPEED: false
|
||||
INSTALL_FLASHATTN: false
|
||||
PIP_INDEX: https://pypi.org/simple
|
||||
EXTRAS: metrics
|
||||
container_name: llamafactory
|
||||
volumes:
|
||||
- ../../hf_cache:/root/.cache/huggingface
|
||||
- ../../ms_cache:/root/.cache/modelscope
|
||||
- ../../data:/app/data
|
||||
- ../../output:/app/output
|
||||
- ../../saves:/app/saves
|
||||
ports:
|
||||
- "7860:7860"
|
||||
- "8000:8000"
|
||||
ipc: host
|
||||
tty: true
|
||||
# shm_size: "16gb" # ipc: host is set
|
||||
stdin_open: true
|
||||
command: bash
|
||||
devices:
|
||||
|
@ -1,3 +1,4 @@
|
||||
# Copyright 2025 the LlamaFactory team.
|
||||
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@ -21,14 +22,15 @@ import pandas as pd
|
||||
_CITATION = """\
|
||||
@article{huang2023ceval,
|
||||
title={C-Eval: A Multi-Level Multi-Discipline Chinese Evaluation Suite for Foundation Models},
|
||||
author={Huang, Yuzhen and Bai, Yuzhuo and Zhu, Zhihao and Zhang, Junlei and Zhang, Jinghan and Su, Tangjun and Liu, Junteng and Lv, Chuancheng and Zhang, Yikai and Lei, Jiayi and Fu, Yao and Sun, Maosong and He, Junxian},
|
||||
author={Huang, Yuzhen and Bai, Yuzhuo and Zhu, Zhihao and others},
|
||||
journal={arXiv preprint arXiv:2305.08322},
|
||||
year={2023}
|
||||
}
|
||||
"""
|
||||
|
||||
_DESCRIPTION = """\
|
||||
C-Eval is a comprehensive Chinese evaluation suite for foundation models. It consists of 13948 multi-choice questions spanning 52 diverse disciplines and four difficulty levels.
|
||||
C-Eval is a comprehensive Chinese evaluation suite for foundation models.
|
||||
It consists of 13948 multi-choice questions spanning 52 diverse disciplines and four difficulty levels.
|
||||
"""
|
||||
|
||||
_HOMEPAGE = "https://cevalbenchmark.com"
|
||||
|
@ -207,4 +207,4 @@
|
||||
"name": "兽医学",
|
||||
"category": "STEM"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,3 +1,4 @@
|
||||
# Copyright 2025 the LlamaFactory team.
|
||||
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@ -21,14 +22,15 @@ import pandas as pd
|
||||
_CITATION = """\
|
||||
@article{li2023cmmlu,
|
||||
title={CMMLU: Measuring massive multitask language understanding in Chinese},
|
||||
author={Haonan Li and Yixuan Zhang and Fajri Koto and Yifei Yang and Hai Zhao and Yeyun Gong and Nan Duan and Timothy Baldwin},
|
||||
author={Haonan Li and Yixuan Zhang and Fajri Koto and Yifei Yang and others,
|
||||
journal={arXiv preprint arXiv:2306.09212},
|
||||
year={2023}
|
||||
}
|
||||
"""
|
||||
|
||||
_DESCRIPTION = """\
|
||||
CMMLU is a comprehensive Chinese assessment suite specifically designed to evaluate the advanced knowledge and reasoning abilities of LLMs within the Chinese language and cultural context.
|
||||
CMMLU is a comprehensive Chinese assessment suite specifically designed to evaluate the advanced knowledge
|
||||
and reasoning abilities of LLMs within the Chinese language and cultural context.
|
||||
"""
|
||||
|
||||
_HOMEPAGE = "https://github.com/haonan-li/CMMLU"
|
||||
|
@ -267,4 +267,4 @@
|
||||
"name": "世界宗教",
|
||||
"category": "Humanities"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -227,4 +227,4 @@
|
||||
"name": "world religions",
|
||||
"category": "Humanities"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,3 +1,4 @@
|
||||
# Copyright 2025 the LlamaFactory team.
|
||||
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@ -21,14 +22,15 @@ import pandas as pd
|
||||
_CITATION = """\
|
||||
@article{hendryckstest2021,
|
||||
title={Measuring Massive Multitask Language Understanding},
|
||||
author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},
|
||||
author={Dan Hendrycks and Collin Burns and others},
|
||||
journal={Proceedings of the International Conference on Learning Representations (ICLR)},
|
||||
year={2021}
|
||||
}
|
||||
"""
|
||||
|
||||
_DESCRIPTION = """\
|
||||
Measuring Massive Multitask Language Understanding by Dan Hendrycks, Collin Burns, Steven Basart, Andy Zou, Mantas Mazeika, Dawn Song, and Jacob Steinhardt (ICLR 2021).
|
||||
Measuring Massive Multitask Language Understanding by Dan Hendrycks, Collin Burns, Steven Basart,
|
||||
Andy Zou, Mantas Mazeika, Dawn Song, and Jacob Steinhardt (ICLR 2021).
|
||||
"""
|
||||
|
||||
_HOMEPAGE = "https://github.com/hendrycks/test"
|
||||
@ -158,5 +160,4 @@ class MMLU(datasets.GeneratorBasedBuilder):
|
||||
df = pd.read_csv(filepath, header=None)
|
||||
df.columns = ["question", "A", "B", "C", "D", "answer"]
|
||||
|
||||
for i, instance in enumerate(df.to_dict(orient="records")):
|
||||
yield i, instance
|
||||
yield from enumerate(df.to_dict(orient="records"))
|
||||
|
@ -13,6 +13,26 @@ Make sure to execute these commands in the `LLaMA-Factory` directory.
|
||||
|
||||
Use `CUDA_VISIBLE_DEVICES` (GPU) or `ASCEND_RT_VISIBLE_DEVICES` (NPU) to choose computing devices.
|
||||
|
||||
By default, LLaMA-Factory uses all visible computing devices.
|
||||
|
||||
Basic usage:
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/train_lora/llama3_lora_sft.yaml
|
||||
```
|
||||
|
||||
Advanced usage:
|
||||
|
||||
```bash
|
||||
CUDA_VISIBLE_DEVICES=0,1 llamafactory-cli train examples/train_lora/llama3_lora_sft.yaml \
|
||||
learning_rate=1e-5 \
|
||||
logging_steps=1
|
||||
```
|
||||
|
||||
```bash
|
||||
bash examples/train_lora/llama3_lora_sft.sh
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
### LoRA Fine-Tuning
|
||||
@ -32,8 +52,7 @@ llamafactory-cli train examples/train_lora/llama3_lora_sft.yaml
|
||||
#### Multimodal Supervised Fine-Tuning
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/train_lora/llava1_5_lora_sft.yaml
|
||||
llamafactory-cli train examples/train_lora/qwen2vl_lora_sft.yaml
|
||||
llamafactory-cli train examples/train_lora/qwen2_5vl_lora_sft.yaml
|
||||
```
|
||||
|
||||
#### DPO/ORPO/SimPO Training
|
||||
@ -45,7 +64,7 @@ llamafactory-cli train examples/train_lora/llama3_lora_dpo.yaml
|
||||
#### Multimodal DPO/ORPO/SimPO Training
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/train_lora/qwen2vl_lora_dpo.yaml
|
||||
llamafactory-cli train examples/train_lora/qwen2_5vl_lora_dpo.yaml
|
||||
```
|
||||
|
||||
#### Reward Modeling
|
||||
@ -80,17 +99,11 @@ llamafactory-cli train examples/train_lora/llama3_preprocess.yaml
|
||||
llamafactory-cli eval examples/train_lora/llama3_lora_eval.yaml
|
||||
```
|
||||
|
||||
#### Batch Predicting and Computing BLEU and ROUGE Scores
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/train_lora/llama3_lora_predict.yaml
|
||||
```
|
||||
|
||||
#### Supervised Fine-Tuning on Multiple Nodes
|
||||
|
||||
```bash
|
||||
FORCE_TORCHRUN=1 NNODES=2 RANK=0 MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/train_lora/llama3_lora_sft.yaml
|
||||
FORCE_TORCHRUN=1 NNODES=2 RANK=1 MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/train_lora/llama3_lora_sft.yaml
|
||||
FORCE_TORCHRUN=1 NNODES=2 NODE_RANK=0 MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/train_lora/llama3_lora_sft.yaml
|
||||
FORCE_TORCHRUN=1 NNODES=2 NODE_RANK=1 MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/train_lora/llama3_lora_sft.yaml
|
||||
```
|
||||
|
||||
#### Supervised Fine-Tuning with DeepSpeed ZeRO-3 (Weight Sharding)
|
||||
@ -99,6 +112,12 @@ FORCE_TORCHRUN=1 NNODES=2 RANK=1 MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llama
|
||||
FORCE_TORCHRUN=1 llamafactory-cli train examples/train_lora/llama3_lora_sft_ds3.yaml
|
||||
```
|
||||
|
||||
#### Supervised Fine-Tuning with Ray on 4 GPUs
|
||||
|
||||
```bash
|
||||
USE_RAY=1 llamafactory-cli train examples/train_lora/llama3_lora_sft_ray.yaml
|
||||
```
|
||||
|
||||
### QLoRA Fine-Tuning
|
||||
|
||||
#### Supervised Fine-Tuning with 4/8-bit Bitsandbytes/HQQ/EETQ Quantization (Recommended)
|
||||
@ -107,6 +126,12 @@ FORCE_TORCHRUN=1 llamafactory-cli train examples/train_lora/llama3_lora_sft_ds3.
|
||||
llamafactory-cli train examples/train_qlora/llama3_lora_sft_otfq.yaml
|
||||
```
|
||||
|
||||
#### Supervised Fine-Tuning with 4-bit Bitsandbytes Quantization on Ascend NPU
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/train_qlora/llama3_lora_sft_bnb_npu.yaml
|
||||
```
|
||||
|
||||
#### Supervised Fine-Tuning with 4/8-bit GPTQ Quantization
|
||||
|
||||
```bash
|
||||
@ -130,26 +155,28 @@ llamafactory-cli train examples/train_qlora/llama3_lora_sft_aqlm.yaml
|
||||
#### Supervised Fine-Tuning on Single Node
|
||||
|
||||
```bash
|
||||
FORCE_TORCHRUN=1 llamafactory-cli train examples/train_full/llama3_full_sft_ds3.yaml
|
||||
FORCE_TORCHRUN=1 llamafactory-cli train examples/train_full/llama3_full_sft.yaml
|
||||
```
|
||||
|
||||
#### Supervised Fine-Tuning on Multiple Nodes
|
||||
|
||||
```bash
|
||||
FORCE_TORCHRUN=1 NNODES=2 RANK=0 MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/train_full/llama3_full_sft_ds3.yaml
|
||||
FORCE_TORCHRUN=1 NNODES=2 RANK=1 MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/train_full/llama3_full_sft_ds3.yaml
|
||||
FORCE_TORCHRUN=1 NNODES=2 NODE_RANK=0 MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/train_full/llama3_full_sft.yaml
|
||||
FORCE_TORCHRUN=1 NNODES=2 NODE_RANK=1 MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/train_full/llama3_full_sft.yaml
|
||||
```
|
||||
|
||||
### Elastic and Fault-Tolerant Supervised Fine-Tuning on Multiple Nodes
|
||||
|
||||
To launch an elastic job with `MAX_RESTARTS` failures retries, run the following on at least `MIN_NNODES` nodes and at most `MAX_NNODES` nodes. `RDZV_ID` should be set as a unique job id (shared by all nodes participating in the job). See also [torchrun](https://docs.pytorch.org/docs/stable/elastic/run.html).
|
||||
|
||||
```bash
|
||||
FORCE_TORCHRUN=1 MIN_NNODES=1 MAX_NNODES=3 MAX_RESTARTS=3 RDZV_ID=llamafactory MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/train_full/llama3_full_sft.yaml
|
||||
```
|
||||
|
||||
#### Multimodal Supervised Fine-Tuning
|
||||
|
||||
```bash
|
||||
FORCE_TORCHRUN=1 llamafactory-cli train examples/train_full/qwen2vl_full_sft.yaml
|
||||
```
|
||||
|
||||
#### Batch Predicting and Computing BLEU and ROUGE Scores
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/train_full/llama3_full_predict.yaml
|
||||
FORCE_TORCHRUN=1 llamafactory-cli train examples/train_full/qwen2_5vl_full_sft.yaml
|
||||
```
|
||||
|
||||
### Merging LoRA Adapters and Quantization
|
||||
@ -168,15 +195,28 @@ llamafactory-cli export examples/merge_lora/llama3_lora_sft.yaml
|
||||
llamafactory-cli export examples/merge_lora/llama3_gptq.yaml
|
||||
```
|
||||
|
||||
### Save Ollama modelfile
|
||||
|
||||
```bash
|
||||
llamafactory-cli export examples/merge_lora/llama3_full_sft.yaml
|
||||
```
|
||||
|
||||
### Inferring LoRA Fine-Tuned Models
|
||||
|
||||
#### Use CLI
|
||||
#### Evaluation using vLLM's Multi-GPU Inference
|
||||
|
||||
```
|
||||
python scripts/vllm_infer.py --model_name_or_path meta-llama/Meta-Llama-3-8B-Instruct --template llama3 --dataset alpaca_en_demo
|
||||
python scripts/eval_bleu_rouge.py generated_predictions.jsonl
|
||||
```
|
||||
|
||||
#### Use CLI ChatBox
|
||||
|
||||
```bash
|
||||
llamafactory-cli chat examples/inference/llama3_lora_sft.yaml
|
||||
```
|
||||
|
||||
#### Use Web UI
|
||||
#### Use Web UI ChatBox
|
||||
|
||||
```bash
|
||||
llamafactory-cli webchat examples/inference/llama3_lora_sft.yaml
|
||||
@ -196,6 +236,12 @@ llamafactory-cli api examples/inference/llama3_lora_sft.yaml
|
||||
llamafactory-cli train examples/extras/galore/llama3_full_sft.yaml
|
||||
```
|
||||
|
||||
#### Full-Parameter Fine-Tuning using APOLLO
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/extras/apollo/llama3_full_sft.yaml
|
||||
```
|
||||
|
||||
#### Full-Parameter Fine-Tuning using BAdam
|
||||
|
||||
```bash
|
||||
@ -208,6 +254,12 @@ llamafactory-cli train examples/extras/badam/llama3_full_sft.yaml
|
||||
llamafactory-cli train examples/extras/adam_mini/qwen2_full_sft.yaml
|
||||
```
|
||||
|
||||
#### Full-Parameter Fine-Tuning using Muon
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/extras/muon/qwen2_full_sft.yaml
|
||||
```
|
||||
|
||||
#### LoRA+ Fine-Tuning
|
||||
|
||||
```bash
|
||||
|
@ -13,6 +13,26 @@
|
||||
|
||||
使用 `CUDA_VISIBLE_DEVICES`(GPU)或 `ASCEND_RT_VISIBLE_DEVICES`(NPU)选择计算设备。
|
||||
|
||||
LLaMA-Factory 默认使用所有可见的计算设备。
|
||||
|
||||
基础用法:
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/train_lora/llama3_lora_sft.yaml
|
||||
```
|
||||
|
||||
高级用法:
|
||||
|
||||
```bash
|
||||
CUDA_VISIBLE_DEVICES=0,1 llamafactory-cli train examples/train_lora/llama3_lora_sft.yaml \
|
||||
learning_rate=1e-5 \
|
||||
logging_steps=1
|
||||
```
|
||||
|
||||
```bash
|
||||
bash examples/train_lora/llama3_lora_sft.sh
|
||||
```
|
||||
|
||||
## 示例
|
||||
|
||||
### LoRA 微调
|
||||
@ -32,8 +52,7 @@ llamafactory-cli train examples/train_lora/llama3_lora_sft.yaml
|
||||
#### 多模态指令监督微调
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/train_lora/llava1_5_lora_sft.yaml
|
||||
llamafactory-cli train examples/train_lora/qwen2vl_lora_sft.yaml
|
||||
llamafactory-cli train examples/train_lora/qwen2_5vl_lora_sft.yaml
|
||||
```
|
||||
|
||||
#### DPO/ORPO/SimPO 训练
|
||||
@ -45,7 +64,7 @@ llamafactory-cli train examples/train_lora/llama3_lora_dpo.yaml
|
||||
#### 多模态 DPO/ORPO/SimPO 训练
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/train_lora/qwen2vl_lora_dpo.yaml
|
||||
llamafactory-cli train examples/train_lora/qwen2_5vl_lora_dpo.yaml
|
||||
```
|
||||
|
||||
#### 奖励模型训练
|
||||
@ -80,17 +99,19 @@ llamafactory-cli train examples/train_lora/llama3_preprocess.yaml
|
||||
llamafactory-cli eval examples/train_lora/llama3_lora_eval.yaml
|
||||
```
|
||||
|
||||
#### 批量预测并计算 BLEU 和 ROUGE 分数
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/train_lora/llama3_lora_predict.yaml
|
||||
```
|
||||
|
||||
#### 多机指令监督微调
|
||||
|
||||
```bash
|
||||
FORCE_TORCHRUN=1 NNODES=2 RANK=0 MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/train_lora/llama3_lora_sft.yaml
|
||||
FORCE_TORCHRUN=1 NNODES=2 RANK=1 MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/train_lora/llama3_lora_sft.yaml
|
||||
FORCE_TORCHRUN=1 NNODES=2 NODE_RANK=0 MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/train_lora/llama3_lora_sft.yaml
|
||||
FORCE_TORCHRUN=1 NNODES=2 NODE_RANK=1 MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/train_lora/llama3_lora_sft.yaml
|
||||
```
|
||||
|
||||
### 支持弹性和容错的多机指令监督微调
|
||||
|
||||
要启动一个支持弹性节点和容错的多机指令微调,在每个节点上执行以下命令。弹性节点数量范围为 `MIN_NNODES:MAX_NNODES`,每个节点最多允许因为错误重启 `MAX_RESTARTS` 次。`RDZV_ID` 应设置为一个唯一的作业 ID(由参与该作业的所有节点共享)。更多新可以参考官方文档 [torchrun](https://docs.pytorch.org/docs/stable/elastic/run.html)。
|
||||
|
||||
```bash
|
||||
FORCE_TORCHRUN=1 MIN_NNODES=1 MAX_NNODES=3 MAX_RESTARTS=3 RDZV_ID=llamafactory MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/train_full/llama3_full_sft.yaml
|
||||
```
|
||||
|
||||
#### 使用 DeepSpeed ZeRO-3 平均分配显存
|
||||
@ -99,6 +120,12 @@ FORCE_TORCHRUN=1 NNODES=2 RANK=1 MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llama
|
||||
FORCE_TORCHRUN=1 llamafactory-cli train examples/train_lora/llama3_lora_sft_ds3.yaml
|
||||
```
|
||||
|
||||
#### 使用 Ray 在 4 张 GPU 上微调
|
||||
|
||||
```bash
|
||||
USE_RAY=1 llamafactory-cli train examples/train_lora/llama3_lora_sft_ray.yaml
|
||||
```
|
||||
|
||||
### QLoRA 微调
|
||||
|
||||
#### 基于 4/8 比特 Bitsandbytes/HQQ/EETQ 量化进行指令监督微调(推荐)
|
||||
@ -107,6 +134,12 @@ FORCE_TORCHRUN=1 llamafactory-cli train examples/train_lora/llama3_lora_sft_ds3.
|
||||
llamafactory-cli train examples/train_qlora/llama3_lora_sft_otfq.yaml
|
||||
```
|
||||
|
||||
#### 在 NPU 上基于 4 比特 Bitsandbytes 量化进行指令监督微调
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/train_qlora/llama3_lora_sft_bnb_npu.yaml
|
||||
```
|
||||
|
||||
#### 基于 4/8 比特 GPTQ 量化进行指令监督微调
|
||||
|
||||
```bash
|
||||
@ -130,26 +163,20 @@ llamafactory-cli train examples/train_qlora/llama3_lora_sft_aqlm.yaml
|
||||
#### 在单机上进行指令监督微调
|
||||
|
||||
```bash
|
||||
FORCE_TORCHRUN=1 llamafactory-cli train examples/train_full/llama3_full_sft_ds3.yaml
|
||||
FORCE_TORCHRUN=1 llamafactory-cli train examples/train_full/llama3_full_sft.yaml
|
||||
```
|
||||
|
||||
#### 在多机上进行指令监督微调
|
||||
|
||||
```bash
|
||||
FORCE_TORCHRUN=1 NNODES=2 RANK=0 MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/train_full/llama3_full_sft_ds3.yaml
|
||||
FORCE_TORCHRUN=1 NNODES=2 RANK=1 MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/train_full/llama3_full_sft_ds3.yaml
|
||||
FORCE_TORCHRUN=1 NNODES=2 NODE_RANK=0 MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/train_full/llama3_full_sft.yaml
|
||||
FORCE_TORCHRUN=1 NNODES=2 NODE_RANK=1 MASTER_ADDR=192.168.0.1 MASTER_PORT=29500 llamafactory-cli train examples/train_full/llama3_full_sft.yaml
|
||||
```
|
||||
|
||||
#### 多模态指令监督微调
|
||||
|
||||
```bash
|
||||
FORCE_TORCHRUN=1 llamafactory-cli train examples/train_full/qwen2vl_full_sft.yaml
|
||||
```
|
||||
|
||||
#### 批量预测并计算 BLEU 和 ROUGE 分数
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/train_full/llama3_full_predict.yaml
|
||||
FORCE_TORCHRUN=1 llamafactory-cli train examples/train_full/qwen2_5vl_full_sft.yaml
|
||||
```
|
||||
|
||||
### 合并 LoRA 适配器与模型量化
|
||||
@ -168,15 +195,28 @@ llamafactory-cli export examples/merge_lora/llama3_lora_sft.yaml
|
||||
llamafactory-cli export examples/merge_lora/llama3_gptq.yaml
|
||||
```
|
||||
|
||||
### 保存 Ollama 配置文件
|
||||
|
||||
```bash
|
||||
llamafactory-cli export examples/merge_lora/llama3_full_sft.yaml
|
||||
```
|
||||
|
||||
### 推理 LoRA 模型
|
||||
|
||||
#### 使用命令行接口
|
||||
#### 使用 vLLM 多卡推理评估
|
||||
|
||||
```
|
||||
python scripts/vllm_infer.py --model_name_or_path meta-llama/Meta-Llama-3-8B-Instruct --template llama3 --dataset alpaca_en_demo
|
||||
python scripts/eval_bleu_rouge.py generated_predictions.jsonl
|
||||
```
|
||||
|
||||
#### 使用命令行对话框
|
||||
|
||||
```bash
|
||||
llamafactory-cli chat examples/inference/llama3_lora_sft.yaml
|
||||
```
|
||||
|
||||
#### 使用浏览器界面
|
||||
#### 使用浏览器对话框
|
||||
|
||||
```bash
|
||||
llamafactory-cli webchat examples/inference/llama3_lora_sft.yaml
|
||||
@ -196,6 +236,12 @@ llamafactory-cli api examples/inference/llama3_lora_sft.yaml
|
||||
llamafactory-cli train examples/extras/galore/llama3_full_sft.yaml
|
||||
```
|
||||
|
||||
#### 使用 APOLLO 进行全参数训练
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/extras/apollo/llama3_full_sft.yaml
|
||||
```
|
||||
|
||||
#### 使用 BAdam 进行全参数训练
|
||||
|
||||
```bash
|
||||
@ -208,6 +254,12 @@ llamafactory-cli train examples/extras/badam/llama3_full_sft.yaml
|
||||
llamafactory-cli train examples/extras/adam_mini/qwen2_full_sft.yaml
|
||||
```
|
||||
|
||||
#### 使用 Muon 进行全参数训练
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/extras/muon/qwen2_full_sft.yaml
|
||||
```
|
||||
|
||||
#### LoRA+ 微调
|
||||
|
||||
```bash
|
||||
|
@ -7,16 +7,16 @@ fsdp_config:
|
||||
fsdp_backward_prefetch: BACKWARD_PRE
|
||||
fsdp_forward_prefetch: false
|
||||
fsdp_cpu_ram_efficient_loading: true
|
||||
fsdp_offload_params: true # offload may affect training speed
|
||||
fsdp_offload_params: false
|
||||
fsdp_sharding_strategy: FULL_SHARD
|
||||
fsdp_state_dict_type: FULL_STATE_DICT
|
||||
fsdp_sync_module_states: true
|
||||
fsdp_use_orig_params: true
|
||||
machine_rank: 0
|
||||
main_training_function: main
|
||||
mixed_precision: fp16 # or bf16
|
||||
num_machines: 1 # the number of nodes
|
||||
num_processes: 2 # the number of GPUs in all nodes
|
||||
mixed_precision: bf16 # or fp16
|
||||
num_machines: 1 # the number of nodes
|
||||
num_processes: 2 # the number of GPUs in all nodes
|
||||
rdzv_backend: static
|
||||
same_network: true
|
||||
tpu_env: []
|
||||
|
25
examples/accelerate/fsdp_config_offload.yaml
Normal file
@ -0,0 +1,25 @@
|
||||
compute_environment: LOCAL_MACHINE
|
||||
debug: false
|
||||
distributed_type: FSDP
|
||||
downcast_bf16: 'no'
|
||||
fsdp_config:
|
||||
fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
|
||||
fsdp_backward_prefetch: BACKWARD_PRE
|
||||
fsdp_forward_prefetch: false
|
||||
fsdp_cpu_ram_efficient_loading: true
|
||||
fsdp_offload_params: true # offload may affect training speed
|
||||
fsdp_sharding_strategy: FULL_SHARD
|
||||
fsdp_state_dict_type: FULL_STATE_DICT
|
||||
fsdp_sync_module_states: true
|
||||
fsdp_use_orig_params: true
|
||||
machine_rank: 0
|
||||
main_training_function: main
|
||||
mixed_precision: bf16 # or fp16
|
||||
num_machines: 1 # the number of nodes
|
||||
num_processes: 2 # the number of GPUs in all nodes
|
||||
rdzv_backend: static
|
||||
same_network: true
|
||||
tpu_env: []
|
||||
tpu_use_cluster: false
|
||||
tpu_use_sudo: false
|
||||
use_cpu: false
|
@ -19,10 +19,10 @@
|
||||
"stage": 0,
|
||||
"allgather_partitions": true,
|
||||
"allgather_bucket_size": 5e8,
|
||||
"overlap_comm": true,
|
||||
"overlap_comm": false,
|
||||
"reduce_scatter": true,
|
||||
"reduce_bucket_size": 5e8,
|
||||
"contiguous_gradients": true,
|
||||
"round_robin_gradients": true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -19,10 +19,10 @@
|
||||
"stage": 2,
|
||||
"allgather_partitions": true,
|
||||
"allgather_bucket_size": 5e8,
|
||||
"overlap_comm": true,
|
||||
"overlap_comm": false,
|
||||
"reduce_scatter": true,
|
||||
"reduce_bucket_size": 5e8,
|
||||
"contiguous_gradients": true,
|
||||
"round_robin_gradients": true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -23,10 +23,10 @@
|
||||
},
|
||||
"allgather_partitions": true,
|
||||
"allgather_bucket_size": 5e8,
|
||||
"overlap_comm": true,
|
||||
"overlap_comm": false,
|
||||
"reduce_scatter": true,
|
||||
"reduce_bucket_size": 5e8,
|
||||
"contiguous_gradients": true,
|
||||
"round_robin_gradients": true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -17,7 +17,7 @@
|
||||
},
|
||||
"zero_optimization": {
|
||||
"stage": 3,
|
||||
"overlap_comm": true,
|
||||
"overlap_comm": false,
|
||||
"contiguous_gradients": true,
|
||||
"sub_group_size": 1e9,
|
||||
"reduce_bucket_size": "auto",
|
||||
@ -27,4 +27,4 @@
|
||||
"stage3_max_reuse_distance": 1e9,
|
||||
"stage3_gather_16bit_weights_on_model_save": true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -25,7 +25,7 @@
|
||||
"device": "cpu",
|
||||
"pin_memory": true
|
||||
},
|
||||
"overlap_comm": true,
|
||||
"overlap_comm": false,
|
||||
"contiguous_gradients": true,
|
||||
"sub_group_size": 1e9,
|
||||
"reduce_bucket_size": "auto",
|
||||
@ -35,4 +35,4 @@
|
||||
"stage3_max_reuse_distance": 1e9,
|
||||
"stage3_gather_16bit_weights_on_model_save": true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,6 @@
|
||||
### model
|
||||
model_name_or_path: Qwen/Qwen2-1.5B-Instruct
|
||||
trust_remote_code: true
|
||||
|
||||
### method
|
||||
stage: sft
|
||||
@ -10,10 +11,11 @@ use_adam_mini: true
|
||||
### dataset
|
||||
dataset: identity,alpaca_en_demo
|
||||
template: qwen
|
||||
cutoff_len: 1024
|
||||
cutoff_len: 2048
|
||||
max_samples: 1000
|
||||
overwrite_cache: true
|
||||
preprocessing_num_workers: 16
|
||||
dataloader_num_workers: 4
|
||||
|
||||
### output
|
||||
output_dir: saves/qwen2-1_5b/full/sft
|
||||
@ -21,6 +23,8 @@ logging_steps: 10
|
||||
save_steps: 500
|
||||
plot_loss: true
|
||||
overwrite_output_dir: true
|
||||
save_only_model: false
|
||||
report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow]
|
||||
|
||||
### train
|
||||
per_device_train_batch_size: 1
|
||||
@ -33,7 +37,7 @@ bf16: true
|
||||
ddp_timeout: 180000000
|
||||
|
||||
### eval
|
||||
val_size: 0.1
|
||||
per_device_eval_batch_size: 1
|
||||
eval_strategy: steps
|
||||
eval_steps: 500
|
||||
# val_size: 0.1
|
||||
# per_device_eval_batch_size: 1
|
||||
# eval_strategy: steps
|
||||
# eval_steps: 500
|
||||
|
48
examples/extras/apollo/llama3_full_sft.yaml
Normal file
@ -0,0 +1,48 @@
|
||||
### model
|
||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
||||
trust_remote_code: true
|
||||
|
||||
### method
|
||||
stage: sft
|
||||
do_train: true
|
||||
finetuning_type: full
|
||||
use_apollo: true
|
||||
apollo_layerwise: true # choices: [true, false], use false for DDP training
|
||||
apollo_target: all
|
||||
apollo_rank: 128
|
||||
apollo_scale: 32.0
|
||||
apollo_scale_type: channel
|
||||
|
||||
### dataset
|
||||
dataset: identity,alpaca_en_demo
|
||||
template: llama3
|
||||
cutoff_len: 2048
|
||||
max_samples: 1000
|
||||
overwrite_cache: true
|
||||
preprocessing_num_workers: 16
|
||||
dataloader_num_workers: 4
|
||||
|
||||
### output
|
||||
output_dir: saves/llama3-8b/full/sft
|
||||
logging_steps: 10
|
||||
save_steps: 500
|
||||
plot_loss: true
|
||||
overwrite_output_dir: true
|
||||
save_only_model: false
|
||||
report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow]
|
||||
|
||||
### train
|
||||
per_device_train_batch_size: 1
|
||||
gradient_accumulation_steps: 1 # use 1 for layerwise apollo
|
||||
learning_rate: 1.0e-5
|
||||
num_train_epochs: 3.0
|
||||
lr_scheduler_type: cosine
|
||||
warmup_ratio: 0.1
|
||||
pure_bf16: true
|
||||
ddp_timeout: 180000000
|
||||
|
||||
### eval
|
||||
# val_size: 0.1
|
||||
# per_device_eval_batch_size: 1
|
||||
# eval_strategy: steps
|
||||
# eval_steps: 500
|
@ -1,5 +1,6 @@
|
||||
### model
|
||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
||||
trust_remote_code: true
|
||||
|
||||
### method
|
||||
stage: sft
|
||||
@ -15,10 +16,11 @@ badam_verbose: 2
|
||||
### dataset
|
||||
dataset: identity,alpaca_en_demo
|
||||
template: llama3
|
||||
cutoff_len: 1024
|
||||
cutoff_len: 2048
|
||||
max_samples: 1000
|
||||
overwrite_cache: true
|
||||
preprocessing_num_workers: 16
|
||||
dataloader_num_workers: 4
|
||||
|
||||
### output
|
||||
output_dir: saves/llama3-8b/full/sft
|
||||
@ -26,6 +28,8 @@ logging_steps: 10
|
||||
save_steps: 500
|
||||
plot_loss: true
|
||||
overwrite_output_dir: true
|
||||
save_only_model: false
|
||||
report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow]
|
||||
|
||||
### train
|
||||
per_device_train_batch_size: 1
|
||||
@ -36,7 +40,7 @@ lr_scheduler_type: cosine
|
||||
warmup_ratio: 0.1
|
||||
|
||||
### eval
|
||||
val_size: 0.1
|
||||
per_device_eval_batch_size: 1
|
||||
eval_strategy: steps
|
||||
eval_steps: 500
|
||||
# val_size: 0.1
|
||||
# per_device_eval_batch_size: 1
|
||||
# eval_strategy: steps
|
||||
# eval_steps: 500
|
||||
|
@ -1,20 +1,23 @@
|
||||
### model
|
||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
||||
quantization_bit: 4
|
||||
trust_remote_code: true
|
||||
|
||||
### method
|
||||
stage: sft
|
||||
do_train: true
|
||||
finetuning_type: lora
|
||||
lora_rank: 8
|
||||
lora_target: all
|
||||
|
||||
### dataset
|
||||
dataset: identity,alpaca_en_demo
|
||||
template: llama3
|
||||
cutoff_len: 1024
|
||||
cutoff_len: 2048
|
||||
max_samples: 1000
|
||||
overwrite_cache: true
|
||||
preprocessing_num_workers: 16
|
||||
dataloader_num_workers: 4
|
||||
|
||||
### output
|
||||
output_dir: saves/llama3-8b/lora/sft
|
||||
@ -22,6 +25,8 @@ logging_steps: 10
|
||||
save_steps: 500
|
||||
plot_loss: true
|
||||
overwrite_output_dir: true
|
||||
save_only_model: false
|
||||
report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow]
|
||||
|
||||
### train
|
||||
per_device_train_batch_size: 1
|
||||
@ -34,7 +39,7 @@ bf16: true
|
||||
ddp_timeout: 180000000
|
||||
|
||||
### eval
|
||||
val_size: 0.1
|
||||
per_device_eval_batch_size: 1
|
||||
eval_strategy: steps
|
||||
eval_steps: 500
|
||||
# val_size: 0.1
|
||||
# per_device_eval_batch_size: 1
|
||||
# eval_strategy: steps
|
||||
# eval_steps: 500
|
||||
|
@ -1,23 +1,25 @@
|
||||
### model
|
||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
||||
trust_remote_code: true
|
||||
|
||||
### method
|
||||
stage: sft
|
||||
do_train: true
|
||||
finetuning_type: full
|
||||
use_galore: true
|
||||
galore_layerwise: true
|
||||
galore_target: mlp,self_attn
|
||||
galore_layerwise: true # choices: [true, false], use false for DDP training
|
||||
galore_target: all
|
||||
galore_rank: 128
|
||||
galore_scale: 2.0
|
||||
|
||||
### dataset
|
||||
dataset: identity,alpaca_en_demo
|
||||
template: llama3
|
||||
cutoff_len: 1024
|
||||
cutoff_len: 2048
|
||||
max_samples: 1000
|
||||
overwrite_cache: true
|
||||
preprocessing_num_workers: 16
|
||||
dataloader_num_workers: 4
|
||||
|
||||
### output
|
||||
output_dir: saves/llama3-8b/full/sft
|
||||
@ -25,10 +27,12 @@ logging_steps: 10
|
||||
save_steps: 500
|
||||
plot_loss: true
|
||||
overwrite_output_dir: true
|
||||
save_only_model: false
|
||||
report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow]
|
||||
|
||||
### train
|
||||
per_device_train_batch_size: 1
|
||||
gradient_accumulation_steps: 1
|
||||
gradient_accumulation_steps: 1 # use 1 for layerwise galore
|
||||
learning_rate: 1.0e-5
|
||||
num_train_epochs: 3.0
|
||||
lr_scheduler_type: cosine
|
||||
@ -37,7 +41,7 @@ pure_bf16: true
|
||||
ddp_timeout: 180000000
|
||||
|
||||
### eval
|
||||
val_size: 0.1
|
||||
per_device_eval_batch_size: 1
|
||||
eval_strategy: steps
|
||||
eval_steps: 500
|
||||
# val_size: 0.1
|
||||
# per_device_eval_batch_size: 1
|
||||
# eval_strategy: steps
|
||||
# eval_steps: 500
|
||||
|
@ -1,5 +1,6 @@
|
||||
### model
|
||||
model_name_or_path: models/llama3-8b-pro
|
||||
trust_remote_code: true
|
||||
|
||||
### method
|
||||
stage: sft
|
||||
@ -12,10 +13,11 @@ use_llama_pro: true
|
||||
### dataset
|
||||
dataset: identity,alpaca_en_demo
|
||||
template: llama3
|
||||
cutoff_len: 1024
|
||||
cutoff_len: 2048
|
||||
max_samples: 1000
|
||||
overwrite_cache: true
|
||||
preprocessing_num_workers: 16
|
||||
dataloader_num_workers: 4
|
||||
|
||||
### output
|
||||
output_dir: saves/llama3-8b-pro/freeze/sft
|
||||
@ -23,6 +25,8 @@ logging_steps: 10
|
||||
save_steps: 500
|
||||
plot_loss: true
|
||||
overwrite_output_dir: true
|
||||
save_only_model: false
|
||||
report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow]
|
||||
|
||||
### train
|
||||
per_device_train_batch_size: 1
|
||||
@ -35,7 +39,7 @@ bf16: true
|
||||
ddp_timeout: 180000000
|
||||
|
||||
### eval
|
||||
val_size: 0.1
|
||||
per_device_eval_batch_size: 1
|
||||
eval_strategy: steps
|
||||
eval_steps: 500
|
||||
# val_size: 0.1
|
||||
# per_device_eval_batch_size: 1
|
||||
# eval_strategy: steps
|
||||
# eval_steps: 500
|
||||
|
@ -1,20 +1,23 @@
|
||||
### model
|
||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
||||
trust_remote_code: true
|
||||
|
||||
### method
|
||||
stage: sft
|
||||
do_train: true
|
||||
finetuning_type: lora
|
||||
lora_rank: 8
|
||||
lora_target: all
|
||||
loraplus_lr_ratio: 16.0
|
||||
|
||||
### dataset
|
||||
dataset: identity,alpaca_en_demo
|
||||
template: llama3
|
||||
cutoff_len: 1024
|
||||
cutoff_len: 2048
|
||||
max_samples: 1000
|
||||
overwrite_cache: true
|
||||
preprocessing_num_workers: 16
|
||||
dataloader_num_workers: 4
|
||||
|
||||
### output
|
||||
output_dir: saves/llama3-8b/lora/sft
|
||||
@ -22,6 +25,8 @@ logging_steps: 10
|
||||
save_steps: 500
|
||||
plot_loss: true
|
||||
overwrite_output_dir: true
|
||||
save_only_model: false
|
||||
report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow]
|
||||
|
||||
### train
|
||||
per_device_train_batch_size: 1
|
||||
@ -34,7 +39,7 @@ bf16: true
|
||||
ddp_timeout: 180000000
|
||||
|
||||
### eval
|
||||
val_size: 0.1
|
||||
per_device_eval_batch_size: 1
|
||||
eval_strategy: steps
|
||||
eval_steps: 500
|
||||
# val_size: 0.1
|
||||
# per_device_eval_batch_size: 1
|
||||
# eval_strategy: steps
|
||||
# eval_steps: 500
|
||||
|
@ -1,5 +1,6 @@
|
||||
### model
|
||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
||||
trust_remote_code: true
|
||||
|
||||
### method
|
||||
stage: sft
|
||||
@ -10,10 +11,11 @@ mixture_of_depths: convert
|
||||
### dataset
|
||||
dataset: identity,alpaca_en_demo
|
||||
template: llama3
|
||||
cutoff_len: 1024
|
||||
cutoff_len: 2048
|
||||
max_samples: 1000
|
||||
overwrite_cache: true
|
||||
preprocessing_num_workers: 16
|
||||
dataloader_num_workers: 4
|
||||
|
||||
### output
|
||||
output_dir: saves/llama3-8b-mod/full/sft
|
||||
@ -21,6 +23,8 @@ logging_steps: 10
|
||||
save_steps: 500
|
||||
plot_loss: true
|
||||
overwrite_output_dir: true
|
||||
save_only_model: false
|
||||
report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow]
|
||||
|
||||
### train
|
||||
per_device_train_batch_size: 1
|
||||
@ -34,7 +38,7 @@ pure_bf16: true
|
||||
ddp_timeout: 180000000
|
||||
|
||||
### eval
|
||||
val_size: 0.1
|
||||
per_device_eval_batch_size: 1
|
||||
eval_strategy: steps
|
||||
eval_steps: 500
|
||||
# val_size: 0.1
|
||||
# per_device_eval_batch_size: 1
|
||||
# eval_strategy: steps
|
||||
# eval_steps: 500
|
||||
|
43
examples/extras/muon/qwen2_full_sft.yaml
Normal file
@ -0,0 +1,43 @@
|
||||
### model
|
||||
model_name_or_path: Qwen/Qwen2-1.5B-Instruct
|
||||
trust_remote_code: true
|
||||
|
||||
### method
|
||||
stage: sft
|
||||
do_train: true
|
||||
finetuning_type: full
|
||||
use_muon: true
|
||||
|
||||
### dataset
|
||||
dataset: identity,alpaca_en_demo
|
||||
template: qwen
|
||||
cutoff_len: 2048
|
||||
max_samples: 1000
|
||||
overwrite_cache: true
|
||||
preprocessing_num_workers: 16
|
||||
dataloader_num_workers: 4
|
||||
|
||||
### output
|
||||
output_dir: saves/qwen2-1_5b/full/sft
|
||||
logging_steps: 10
|
||||
save_steps: 500
|
||||
plot_loss: true
|
||||
overwrite_output_dir: true
|
||||
save_only_model: false
|
||||
report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow]
|
||||
|
||||
### train
|
||||
per_device_train_batch_size: 1
|
||||
gradient_accumulation_steps: 8
|
||||
learning_rate: 1.0e-5
|
||||
num_train_epochs: 3.0
|
||||
lr_scheduler_type: cosine
|
||||
warmup_ratio: 0.1
|
||||
bf16: true
|
||||
ddp_timeout: 180000000
|
||||
|
||||
### eval
|
||||
# val_size: 0.1
|
||||
# per_device_eval_batch_size: 1
|
||||
# eval_strategy: steps
|
||||
# eval_steps: 500
|
@ -1,6 +1,10 @@
|
||||
# The batch generation can be SLOW using this config.
|
||||
# For faster inference, we recommend to use `scripts/vllm_infer.py`.
|
||||
|
||||
### model
|
||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
||||
adapter_name_or_path: saves/llama3-8b/lora/sft
|
||||
trust_remote_code: true
|
||||
|
||||
### method
|
||||
stage: sft
|
||||
@ -10,14 +14,16 @@ finetuning_type: lora
|
||||
### dataset
|
||||
eval_dataset: identity,alpaca_en_demo
|
||||
template: llama3
|
||||
cutoff_len: 1024
|
||||
cutoff_len: 2048
|
||||
max_samples: 50
|
||||
overwrite_cache: true
|
||||
preprocessing_num_workers: 16
|
||||
dataloader_num_workers: 4
|
||||
|
||||
### output
|
||||
output_dir: saves/llama3-8b/lora/predict
|
||||
overwrite_output_dir: true
|
||||
report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow]
|
||||
|
||||
### eval
|
||||
per_device_eval_batch_size: 1
|
@ -1,10 +1,12 @@
|
||||
### model
|
||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
||||
trust_remote_code: true
|
||||
|
||||
### method
|
||||
stage: sft
|
||||
do_train: true
|
||||
finetuning_type: lora
|
||||
lora_rank: 8
|
||||
lora_target: all
|
||||
pissa_init: true
|
||||
pissa_iter: 16
|
||||
@ -13,10 +15,11 @@ pissa_convert: true
|
||||
### dataset
|
||||
dataset: identity,alpaca_en_demo
|
||||
template: llama3
|
||||
cutoff_len: 1024
|
||||
cutoff_len: 2048
|
||||
max_samples: 1000
|
||||
overwrite_cache: true
|
||||
preprocessing_num_workers: 16
|
||||
dataloader_num_workers: 4
|
||||
|
||||
### output
|
||||
output_dir: saves/llama3-8b/lora/sft
|
||||
@ -24,6 +27,8 @@ logging_steps: 10
|
||||
save_steps: 500
|
||||
plot_loss: true
|
||||
overwrite_output_dir: true
|
||||
save_only_model: false
|
||||
report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow]
|
||||
|
||||
### train
|
||||
per_device_train_batch_size: 1
|
||||
@ -36,7 +41,7 @@ bf16: true
|
||||
ddp_timeout: 180000000
|
||||
|
||||
### eval
|
||||
val_size: 0.1
|
||||
per_device_eval_batch_size: 1
|
||||
eval_strategy: steps
|
||||
eval_steps: 500
|
||||
# val_size: 0.1
|
||||
# per_device_eval_batch_size: 1
|
||||
# eval_strategy: steps
|
||||
# eval_steps: 500
|
||||
|
@ -1,2 +1,4 @@
|
||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
||||
template: llama3
|
||||
infer_backend: huggingface # choices: [huggingface, vllm, sglang]
|
||||
trust_remote_code: true
|
||||
|
4
examples/inference/llama3_full_sft.yaml
Normal file
@ -0,0 +1,4 @@
|
||||
model_name_or_path: saves/llama3-8b/full/sft
|
||||
template: llama3
|
||||
infer_backend: huggingface # choices: [huggingface, vllm, sglang]
|
||||
trust_remote_code: true
|
@ -1,4 +1,5 @@
|
||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
||||
adapter_name_or_path: saves/llama3-8b/lora/sft
|
||||
template: llama3
|
||||
finetuning_type: lora
|
||||
infer_backend: huggingface # choices: [huggingface, vllm, sglang]
|
||||
trust_remote_code: true
|
||||
|
@ -1,4 +0,0 @@
|
||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
||||
template: llama3
|
||||
infer_backend: vllm
|
||||
vllm_enforce_eager: true
|
@ -1,2 +0,0 @@
|
||||
model_name_or_path: llava-hf/llava-1.5-7b-hf
|
||||
template: llava
|
4
examples/inference/qwen2_5vl.yaml
Normal file
@ -0,0 +1,4 @@
|
||||
model_name_or_path: Qwen/Qwen2.5-VL-7B-Instruct
|
||||
template: qwen2_vl
|
||||
infer_backend: huggingface # choices: [huggingface, vllm, sglang]
|
||||
trust_remote_code: true
|
@ -1,2 +0,0 @@
|
||||
model_name_or_path: Qwen/Qwen2-VL-7B-Instruct
|
||||
template: qwen2_vl
|
10
examples/merge_lora/llama3_full_sft.yaml
Normal file
@ -0,0 +1,10 @@
|
||||
### model
|
||||
model_name_or_path: saves/llama3-8b/full/sft
|
||||
template: llama3
|
||||
trust_remote_code: true
|
||||
|
||||
### export
|
||||
export_dir: output/llama3_full_sft
|
||||
export_size: 5
|
||||
export_device: cpu # choices: [cpu, auto]
|
||||
export_legacy_format: false
|
@ -1,11 +1,12 @@
|
||||
### model
|
||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
||||
template: llama3
|
||||
trust_remote_code: true
|
||||
|
||||
### export
|
||||
export_dir: models/llama3_gptq
|
||||
export_dir: output/llama3_gptq
|
||||
export_quantization_bit: 4
|
||||
export_quantization_dataset: data/c4_demo.json
|
||||
export_size: 2
|
||||
export_device: cpu
|
||||
export_quantization_dataset: data/c4_demo.jsonl
|
||||
export_size: 5
|
||||
export_device: cpu # choices: [cpu, auto]
|
||||
export_legacy_format: false
|
||||
|
@ -4,10 +4,10 @@
|
||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
||||
adapter_name_or_path: saves/llama3-8b/lora/sft
|
||||
template: llama3
|
||||
finetuning_type: lora
|
||||
trust_remote_code: true
|
||||
|
||||
### export
|
||||
export_dir: models/llama3_lora_sft
|
||||
export_size: 2
|
||||
export_device: cpu
|
||||
export_dir: output/llama3_lora_sft
|
||||
export_size: 5
|
||||
export_device: cpu # choices: [cpu, auto]
|
||||
export_legacy_format: false
|
||||
|
13
examples/merge_lora/qwen2_5vl_lora_sft.yaml
Normal file
@ -0,0 +1,13 @@
|
||||
### Note: DO NOT use quantized model or quantization_bit when merging lora adapters
|
||||
|
||||
### model
|
||||
model_name_or_path: Qwen/Qwen2.5-VL-7B-Instruct
|
||||
adapter_name_or_path: saves/qwen2_5vl-7b/lora/sft
|
||||
template: qwen2_vl
|
||||
trust_remote_code: true
|
||||
|
||||
### export
|
||||
export_dir: output/qwen2_5vl_lora_sft
|
||||
export_size: 5
|
||||
export_device: cpu # choices: [cpu, auto]
|
||||
export_legacy_format: false
|
@ -1,13 +0,0 @@
|
||||
### Note: DO NOT use quantized model or quantization_bit when merging lora adapters
|
||||
|
||||
### model
|
||||
model_name_or_path: Qwen/Qwen2-VL-7B-Instruct
|
||||
adapter_name_or_path: saves/qwen2_vl-7b/lora/sft
|
||||
template: qwen2_vl
|
||||
finetuning_type: lora
|
||||
|
||||
### export
|
||||
export_dir: models/qwen2_vl_lora_sft
|
||||
export_size: 2
|
||||
export_device: cpu
|
||||
export_legacy_format: false
|
@ -1,23 +0,0 @@
|
||||
### model
|
||||
model_name_or_path: saves/llama3-8b/full/sft
|
||||
|
||||
### method
|
||||
stage: sft
|
||||
do_predict: true
|
||||
finetuning_type: full
|
||||
|
||||
### dataset
|
||||
eval_dataset: identity,alpaca_en_demo
|
||||
template: llama3
|
||||
cutoff_len: 1024
|
||||
max_samples: 50
|
||||
overwrite_cache: true
|
||||
preprocessing_num_workers: 16
|
||||
|
||||
### output
|
||||
output_dir: saves/llama3-8b/full/predict
|
||||
overwrite_output_dir: true
|
||||
|
||||
### eval
|
||||
per_device_eval_batch_size: 1
|
||||
predict_with_generate: true
|
@ -1,19 +1,21 @@
|
||||
### model
|
||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
||||
trust_remote_code: true
|
||||
|
||||
### method
|
||||
stage: sft
|
||||
do_train: true
|
||||
finetuning_type: full
|
||||
deepspeed: examples/deepspeed/ds_z3_config.json
|
||||
deepspeed: examples/deepspeed/ds_z3_config.json # choices: [ds_z0_config.json, ds_z2_config.json, ds_z3_config.json]
|
||||
|
||||
### dataset
|
||||
dataset: identity,alpaca_en_demo
|
||||
template: llama3
|
||||
cutoff_len: 1024
|
||||
cutoff_len: 2048
|
||||
max_samples: 1000
|
||||
overwrite_cache: true
|
||||
preprocessing_num_workers: 16
|
||||
dataloader_num_workers: 4
|
||||
|
||||
### output
|
||||
output_dir: saves/llama3-8b/full/sft
|
||||
@ -21,6 +23,8 @@ logging_steps: 10
|
||||
save_steps: 500
|
||||
plot_loss: true
|
||||
overwrite_output_dir: true
|
||||
save_only_model: false
|
||||
report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow]
|
||||
|
||||
### train
|
||||
per_device_train_batch_size: 1
|
||||
@ -31,9 +35,11 @@ lr_scheduler_type: cosine
|
||||
warmup_ratio: 0.1
|
||||
bf16: true
|
||||
ddp_timeout: 180000000
|
||||
resume_from_checkpoint: null
|
||||
|
||||
### eval
|
||||
val_size: 0.1
|
||||
per_device_eval_batch_size: 1
|
||||
eval_strategy: steps
|
||||
eval_steps: 500
|
||||
# eval_dataset: alpaca_en_demo
|
||||
# val_size: 0.1
|
||||
# per_device_eval_batch_size: 1
|
||||
# eval_strategy: steps
|
||||
# eval_steps: 500
|
49
examples/train_full/qwen2_5vl_full_sft.yaml
Normal file
@ -0,0 +1,49 @@
|
||||
### model
|
||||
model_name_or_path: Qwen/Qwen2.5-VL-7B-Instruct
|
||||
image_max_pixels: 262144
|
||||
video_max_pixels: 16384
|
||||
trust_remote_code: true
|
||||
|
||||
### method
|
||||
stage: sft
|
||||
do_train: true
|
||||
finetuning_type: full
|
||||
freeze_vision_tower: true
|
||||
freeze_multi_modal_projector: true
|
||||
freeze_language_model: false
|
||||
deepspeed: examples/deepspeed/ds_z3_config.json
|
||||
|
||||
### dataset
|
||||
dataset: mllm_demo,identity,alpaca_en_demo
|
||||
template: qwen2_vl
|
||||
cutoff_len: 2048
|
||||
max_samples: 1000
|
||||
overwrite_cache: true
|
||||
preprocessing_num_workers: 16
|
||||
dataloader_num_workers: 4
|
||||
|
||||
### output
|
||||
output_dir: saves/qwen2_5vl-7b/full/sft
|
||||
logging_steps: 10
|
||||
save_steps: 500
|
||||
plot_loss: true
|
||||
overwrite_output_dir: true
|
||||
save_only_model: false
|
||||
report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow]
|
||||
|
||||
### train
|
||||
per_device_train_batch_size: 1
|
||||
gradient_accumulation_steps: 2
|
||||
learning_rate: 1.0e-5
|
||||
num_train_epochs: 3.0
|
||||
lr_scheduler_type: cosine
|
||||
warmup_ratio: 0.1
|
||||
bf16: true
|
||||
ddp_timeout: 180000000
|
||||
resume_from_checkpoint: null
|
||||
|
||||
### eval
|
||||
# val_size: 0.1
|
||||
# per_device_eval_batch_size: 1
|
||||
# eval_strategy: steps
|
||||
# eval_steps: 500
|
@ -1,39 +0,0 @@
|
||||
### model
|
||||
model_name_or_path: Qwen/Qwen2-VL-7B-Instruct
|
||||
|
||||
### method
|
||||
stage: sft
|
||||
do_train: true
|
||||
finetuning_type: full
|
||||
deepspeed: examples/deepspeed/ds_z3_config.json
|
||||
|
||||
### dataset
|
||||
dataset: mllm_demo,identity
|
||||
template: qwen2_vl
|
||||
cutoff_len: 1024
|
||||
max_samples: 1000
|
||||
overwrite_cache: true
|
||||
preprocessing_num_workers: 16
|
||||
|
||||
### output
|
||||
output_dir: saves/qwen2_vl-7b/full/sft
|
||||
logging_steps: 10
|
||||
save_steps: 500
|
||||
plot_loss: true
|
||||
overwrite_output_dir: true
|
||||
|
||||
### train
|
||||
per_device_train_batch_size: 1
|
||||
gradient_accumulation_steps: 2
|
||||
learning_rate: 1.0e-5
|
||||
num_train_epochs: 3.0
|
||||
lr_scheduler_type: cosine
|
||||
warmup_ratio: 0.1
|
||||
bf16: true
|
||||
ddp_timeout: 180000000
|
||||
|
||||
### eval
|
||||
val_size: 0.1
|
||||
per_device_eval_batch_size: 1
|
||||
eval_strategy: steps
|
||||
eval_steps: 500
|
@ -1,10 +1,12 @@
|
||||
### model
|
||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
||||
trust_remote_code: true
|
||||
|
||||
### method
|
||||
stage: dpo
|
||||
do_train: true
|
||||
finetuning_type: lora
|
||||
lora_rank: 8
|
||||
lora_target: all
|
||||
pref_beta: 0.1
|
||||
pref_loss: sigmoid # choices: [sigmoid (dpo), orpo, simpo]
|
||||
@ -12,10 +14,11 @@ pref_loss: sigmoid # choices: [sigmoid (dpo), orpo, simpo]
|
||||
### dataset
|
||||
dataset: dpo_en_demo
|
||||
template: llama3
|
||||
cutoff_len: 1024
|
||||
cutoff_len: 2048
|
||||
max_samples: 1000
|
||||
overwrite_cache: true
|
||||
preprocessing_num_workers: 16
|
||||
dataloader_num_workers: 4
|
||||
|
||||
### output
|
||||
output_dir: saves/llama3-8b/lora/dpo
|
||||
@ -23,6 +26,8 @@ logging_steps: 10
|
||||
save_steps: 500
|
||||
plot_loss: true
|
||||
overwrite_output_dir: true
|
||||
save_only_model: false
|
||||
report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow]
|
||||
|
||||
### train
|
||||
per_device_train_batch_size: 1
|
||||
@ -33,9 +38,11 @@ lr_scheduler_type: cosine
|
||||
warmup_ratio: 0.1
|
||||
bf16: true
|
||||
ddp_timeout: 180000000
|
||||
resume_from_checkpoint: null
|
||||
|
||||
### eval
|
||||
val_size: 0.1
|
||||
per_device_eval_batch_size: 1
|
||||
eval_strategy: steps
|
||||
eval_steps: 500
|
||||
# eval_dataset: dpo_en_demo
|
||||
# val_size: 0.1
|
||||
# per_device_eval_batch_size: 1
|
||||
# eval_strategy: steps
|
||||
# eval_steps: 500
|
||||
|
@ -1,6 +1,7 @@
|
||||
### model
|
||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
||||
adapter_name_or_path: saves/llama3-8b/lora/sft
|
||||
trust_remote_code: true
|
||||
|
||||
### method
|
||||
finetuning_type: lora
|
||||
|
@ -1,20 +1,23 @@
|
||||
### model
|
||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
||||
trust_remote_code: true
|
||||
|
||||
### method
|
||||
stage: kto
|
||||
do_train: true
|
||||
finetuning_type: lora
|
||||
lora_rank: 8
|
||||
lora_target: all
|
||||
pref_beta: 0.1
|
||||
|
||||
### dataset
|
||||
dataset: kto_en_demo
|
||||
template: llama3
|
||||
cutoff_len: 1024
|
||||
cutoff_len: 2048
|
||||
max_samples: 1000
|
||||
overwrite_cache: true
|
||||
preprocessing_num_workers: 16
|
||||
dataloader_num_workers: 4
|
||||
|
||||
### output
|
||||
output_dir: saves/llama3-8b/lora/kto
|
||||
@ -22,6 +25,7 @@ logging_steps: 10
|
||||
save_steps: 500
|
||||
plot_loss: true
|
||||
overwrite_output_dir: true
|
||||
report_to: none # choices: [none, wandb, tensorboard, swanlab, mlflow]
|
||||
|
||||
### train
|
||||
per_device_train_batch_size: 1
|
||||
@ -34,7 +38,7 @@ bf16: true
|
||||
ddp_timeout: 180000000
|
||||
|
||||
### eval
|
||||
val_size: 0.1
|
||||
per_device_eval_batch_size: 1
|
||||
eval_strategy: steps
|
||||
eval_steps: 500
|
||||
# val_size: 0.1
|
||||
# per_device_eval_batch_size: 1
|
||||
# eval_strategy: steps
|
||||
# eval_steps: 500
|
||||
|