[index-tts]推理了两遍,应该是bug吧

2025-10-29 544 views
7

tensor([[10201, 10206, 10201, 1754, 10201, 10206, 10206, 10201, 2737, 10201, 1524, 10201, 10206, 10201, 42, 10201, 2683, 10201, 1832, 10201, 1985, 10201, 5656, 10203, 10201, 1046, 10201, 36, 10201, 1035, 10201, 2398, 10201, 555, 10201, 36, 10202, 10206, 10201, 1221, 10201, 1053, 10201, 42, 10201, 2737, 10201, 3912, 10202, 10201, 164, 10201, 3729, 10201, 42, 10201, 2737, 10201, 1397, 10201, 10206, 10201, 2474, 10201, 7, 10201, 34, 10201, 2605, 10201, 388, 10201, 1464, 10201, 755, 10201, 1988, 10201, 16, 10201, 832, 10201, 1414, 10201, 1877, 10201, 5913, 10201, 3880, 10201, 2916, 10201, 6605, 10203]], device='cuda:0', dtype=torch.int32) text_tokens shape: torch.Size([1, 94]), text_tokens type: torch.int32 text_token_syms is same as sentence tokens True Use the specified emotion vector Passing a tuple of past_key_values is deprecated and will be removed in Transformers v4.53.0. You should pass an instance of Cache instead, e.g. past_key_values=DynamicCache.from_legacy_cache(past_key_values). tensor([[1521, 8065, 3708, 1014, 8075, 3659, 7414, 2105, 2398, 2435, 2955, 258, 2591, 1901, 7949, 7185, 2262, 2737, 4722, 3963, 6523, 3866, 7838, 5015, 3325, 5559, 7148, 1444, 2898, 6710, 6437, 7654, 4766, 4232, 2391, 518, 6226, 2902, 937, 4828, 1508, 6465, 2815, 4974, 1537, 1686, 1968, 1466, 5872, 2020, 2808, 660, 1035, 5864, 2672, 6602, 1339, 7287, 5899, 1754, 962, 7371, 6752, 8048, 5009, 7224, 300, 4541, 7923, 3445, 2356, 4477, 5411, 4261, 6994, 5475, 499, 1020, 3821, 5196, 7669, 1174, 1295, 2990, 3789, 6753, 6539, 4029, 6832, 3291, 6549, 7376, 763, 203, 6200, 5254, 240, 358, 226, 2592, 995, 197, 2583, 3962, 5408, 5501, 4913, 5464, 7123, 6889, 8180, 7128, 1371, 670, 7418, 7188, 3159, 6924, 2330, 4997, 3197, 3207, 4285, 5508, 835, 1362, 2911, 8007, 4377, 1120, 2139, 6798, 1073, 5534, 1156, 7130, 4390, 3390, 6113, 3106, 1434, 6636, 2835, 4110, 3580, 7036, 2371, 4393, 8153, 4942, 2067, 1587, 4485, 6547, 6468, 5821, 6306, 8191, 6199, 6077, 4334, 5863, 1330, 3819, 34, 1719, 6049, 2214, 2157, 1741, 5719, 832, 3236, 942, 393, 3153, 129, 5605, 2994, 3126, 1124, 372, 6028, 2629, 3380, 2918, 5391, 3502, 4216, 3733, 795, 7492, 4389, 3440, 2209, 3524, 1682, 6648, 8031, 26, 612, 4844, 1665, 5976, 7053, 427, 7519, 3883, 3029, 7062, 7334, 3355, 7604, 4016, 6907, 4253, 6343, 7510, 402, 830, 241, 6029, 735, 4723, 7661, 4264, 2022, 3145, 6424, 637, 5699, 3230, 2834, 6161, 1840, 329, 3532, 3433, 2756, 4034, 5156, 148, 4001, 398, 434, 5503, 93, 5261, 3435, 576, 6271, 5412, 5973, 13, 5742, 6598, 4562, 6169, 5352, 956, 265, 4255, 4432, 3601, 5772, 5793, 7768, 2598, 2719, 1467, 2038, 2030, 6283, 3538, 1675, 4939, 6401, 7887, 1803, 6299, 2872, 1802, 4481, 907, 1001, 6041, 2439, 7932, 2043, 6734, 7105, 1817, 1775, 4613, 6688, 7857, 2370, 3853, 1118, 6486, 7743, 777, 4143, 4084, 5586, 1214, 3680, 7924, 2494, 4940, 1893, 2394, 6667, 318, 3481, 7239, 4069, 369, 1742, 844, 270, 11, 7257, 2793, 534, 474, 3192, 376, 3637, 3239, 4024, 1064, 7821, 4120, 6778, 4087, 579, 2585, 6756, 5548, 4088, 409, 2953, 939, 7478, 4157, 3844, 3648, 6852, 7942, 1774, 1887, 6585, 1417, 6653, 5199, 7763, 5637, 7325, 2335, 6182, 7875, 6792, 7783, 7000, 3206, 6438, 2174, 4369, 7015, 7091, 36, 6038, 7040, 5382, 6992, 4830, 7993, 7339, 5771, 6504, 909, 863, 1394, 945, 3155, 1688, 4759, 7158, 698, 7309, 4416, 2946, 1676, 5580, 6896, 7098, 1620, 828, 6262, 5044, 1348, 6625, 1190, 631, 7108, 31, 2354, 5582, 1472, 2525, 7443, 6905, 7885, 6990, 6025, 1588, 2257, 5912, 7750, 7915, 4656, 1826, 5014, 7240, 6492, 2716, 200, 5873, 7898, 4164, 1186, 2526, 7685, 5838, 375, 5164, 3118, 2283, 4094, 6996, 5066, 7802, 5791, 4937, 533, 6140, 7104, 341, 3762, 6181, 53, 6505, 8171, 3505, 4951, 5506, 7623, 5005, 2844, 2265, 4619, 2301, 3754, 1541, 3735, 4172, 3992, 2332, 1937, 5854, 5970, 3831, 6402, 5628, 6092, 5701, 596, 5961, 2609, 7393, 8149, 2118, 7080, 7421, 6485, 5294, 627, 2774, 6442, 2035, 1916, 5529, 6007, 2593, 6415, 603, 2838, 7217, 8070, 2343, 2880, 6721, 1305, 5808, 2817, 6947, 179, 6216, 3216, 1769, 5277, 3282, 2768, 1492, 3354, 511, 2318, 6070, 281, 6111, 2274, 2747, 6677, 5616, 5347, 8184, 3793, 4345, 3320, 4875, 147, 2481, 3393, 3575, 3721, 4074, 3047, 8154, 6207, 2399, 3986, 4433, 2879, 7405, 1734, 4976, 3498, 3982, 4471, 7020, 440, 4236, 2307, 4717, 5140, 6369, 2455, 4741, 1758, 4362, 2289, 6983, 6925, 3579, 3869, 6919, 7737, 306, 4527, 4598, 1471, 609, 5012, 5109, 8071, 2529, 3203, 4515, 1823, 3052, 7368, 6233, 931, 4351, 1793, 5448, 904, 3020, 2613, 5174, 4461, 1173, 848, 5905, 4512, 99, 1493, 366, 843, 1609, 2050, 3802, 5711, 1266, 2413, 7204, 7795, 50, 647, 3634, 14, 1792, 6241, 1515, 2122, 1982, 2381, 3152, 5445, 6545, 1398, 1655, 1941, 451, 7810, 6053, 5353, 953, 1809, 5202, 3649, 5169, 6395, 1438, 471, 1993, 5988, 7491, 2098, 7149, 3022, 1942, 1911, 1564, 62, 589, 5233, 6620, 3510, 1530, 4797, 8127, 7943, 8024, 3131, 2528, 4588, 6892, 2426, 2695, 6828, 4604, 6720, 1972, 2502, 101, 3958, 5423, 4015, 6145, 27, 6687, 4177, 4269, 3718, 3378, 545, 4821, 4007, 1902, 2889, 278, 6191, 5644, 4267, 2825, 5960, 822, 952, 6414, 15, 5762, 2709, 4615, 7865, 7272, 5612, 4818, 4490, 7868, 7450, 6198, 3185, 3333, 1115, 3577, 5652, 7998, 4206, 1056, 557, 2565, 1415, 1105, 3139, 3782, 18, 3629, 2814, 3533, 7193, 6931, 6538, 2562, 7429, 1919, 2113, 6949, 7893, 8019, 3285, 6382, 6323, 6528, 6433, 339, 792, 6976, 2460, 4876, 5168, 5677, 3890, 2569, 4020, 6678, 4716, 7537, 7631, 3399, 7892, 5827, 1705, 8081, 3933, 1860, 5312, 1315, 2086, 2980, 794, 4452, 3681, 890, 6939, 4930, 1664, 6731, 6779, 6002, 7908, 3069, 6708, 1209, 970, 5512, 3407, 6099, 562, 3539, 5671, 5903, 132, 4891, 1164, 5600, 7754, 5796, 5848, 237, 6772, 5083, 4364, 6938, 6067, 2866, 4789, 575, 1081, 1278, 6313, 3365, 4249, 4706, 4297, 2539, 4149, 2735, 7545, 5691, 3092, 4395, 4903, 7772, 6023, 1746, 4654, 4838, 286]], device='cuda:0') <class 'torch.Tensor'> fix codes shape: torch.Size([1, 820]), codes type: torch.int64 code len: tensor([820], device='cuda:0') 100%|██████████████████████████████████████████████████████████████████████████████████| 25/25 [00:20<00:00, 1.21it/s] torch.Size([1, 360960]) wav shape: torch.Size([1, 360960]) min: tensor(-23411.9980, device='cuda:0') max: tensor(23456.9629, device='cuda:0') tensor([[10201, 1425, 10201, 5656, 10201, 1805, 10201, 3945, 10201, 755, 10201, 5929, 10201, 3880, 10201, 586, 10201, 1754, 10202, 10201, 5466, 10201, 468, 10201, 3880, 10201, 2435, 10201, 1949, 10201, 142, 10201, 638, 10201, 34, 10201, 197, 10201, 1046, 10201, 1433, 10201, 1220, 10201, 2686, 10201, 1524, 10201, 36, 10201, 3880, 10201, 2193, 10201, 2072, 10203]], device='cuda:0', dtype=torch.int32) text_tokens shape: torch.Size([1, 56]), text_tokens type: torch.int32 text_token_syms is same as sentence tokens True Use the specified emotion vector tensor([[8065, 2817, 5863, 4903, 5726, 4923, 5582, 5007, 5145, 1910, 5534, 2058, 3938, 3086, 4390, 265, 5196, 3246, 329, 3821, 5140, 2531, 20, 6463, 2990, 2807, 4971, 3919, 6957, 7161, 7887, 2356, 1116, 7943, 2663, 6401, 4362, 2067, 6893, 1705, 1489, 7666, 5711, 5408, 6710, 1073, 5177, 369, 2223, 3271, 701, 7592, 5599, 7998, 7551, 5353, 1982, 6602, 1392, 358, 5993, 5781, 7160, 3022, 3808, 6990, 3712, 573, 3433, 6053, 4406, 2756, 7371, 2394, 371, 1911, 5872, 7020, 1902, 7993, 8171, 6684, 2208, 1461, 3657, 6245, 5793, 1655, 1887, 5882, 6976, 1118, 4766, 253, 6911, 5561, 6233, 7240, 3219, 8014, 7913, 7464, 6913, 2786, 1972, 7478, 5568, 6054, 3792, 6838, 5233, 6782, 6083, 5791, 7631, 8071, 5228, 8081, 7038, 4083, 2371, 3793, 2248, 5637, 2143, 4237, 499, 7421, 3708, 3992, 2889, 1671, 660, 4441, 3236, 6482, 1413, 8080, 27, 898, 5161, 2030, 4997, 5739, 6589, 4433, 3519, 7750, 4929, 844, 5593, 2844, 7545, 784, 618, 4481, 6077, 5847, 7772, 2502, 4334, 434, 1790, 1290, 5455, 5143, 6783, 1298, 3189, 129, 7723, 511, 1876, 3282, 5605, 4243, 6809, 1151, 2467, 3120, 2631, 820, 5633, 1817, 4578, 2641, 1792, 50, 2020, 3194, 1377, 7426, 513, 4676, 6437, 6889, 6907, 5495, 1666, 2715, 3801, 6092, 5701, 5297, 7182, 2609, 4106, 6936, 4706, 2283, 7041, 7053, 4974, 8010, 4762, 18, 2481, 4335, 2262, 651, 4909, 1864, 219, 4927, 2509, 7661, 6099, 6918, 5512, 5700, 69, 1493, 3601, 7005, 1553, 7430, 4357, 8090, 139, 4461, 6937, 6334, 7860, 7654, 1071, 6549, 5492, 5964, 5473, 4199, 2793, 2086, 2902, 4110, 4136, 2037, 5514, 3052, 150, 7287, 8106, 402, 7768, 241, 1675, 2945, 7519, 7537, 3338, 7404, 3215, 7881, 3100, 4002, 7875, 2452, 6938, 2391, 7794, 975, 2398, 3637, 4828, 3363, 7368, 1537, 5778, 6828, 1558, 229, 7868, 512, 518, 7443, 8048, 6029, 3020, 4393, 4680, 3333, 1039, 2122, 3399, 7376, 4980, 1155, 4754, 2273, 3479, 3093, 7429, 7432, 1024, 682, 7851, 2862, 4823, 1444, 5831, 6832, 7885, 4364, 2076, 1901, 474, 7185, 2911, 4891, 5254, 1916, 3505, 8007, 4913, 24, 4164, 3990, 4961, 3281, 2994, 5540, 6306, 280, 2077, 6677, 7259, 1877, 6851, 3203, 609, 6241, 3390, 7847, 471, 3206, 3175, 5387, 3890, 3771, 409, 7284, 3091, 2890, 7674, 2592, 3735, 7903, 4016, 4617, 5352, 1360, 850, 4951, 6834, 4087, 7908, 5924, 7188, 5905, 1826, 6140, 7802, 6574, 1448, 173, 6544, 1284, 4112, 3982, 6038, 5587, 90, 545, 4830, 5838, 748, 1915, 3357, 4816, 7552, 4126, 5580, 2840, 8153, 3455, 1035, 2220, 6774, 2118, 4252, 278, 43, 4377, 6191, 6596, 2551, 5014, 520, 6414, 7261, 5762, 6678, 2341, 3450, 1259, 6002, 4232, 1853, 2846, 4580, 4694, 2529, 2408, 7821, 4954, 5889, 6451, 2723, 5796, 6113, 5776, 2063, 1090, 3355, 2105, 3407, 1340, 7915, 1809, 2842, 3015, 3818, 1718, 3782, 2865, 6298, 8013, 1166, 1549, 3841, 4269, 1759, 6425, 6791, 7403, 2845, 5081, 2608, 3290, 6158, 165, 3372, 7510, 4937, 5377, 4351, 6897, 3538, 1614, 5968, 2773, 3378, 3163, 184, 93, 4084, 5168, 1296, 2928, 3435, 4088, 5240, 6315, 396, 2943, 6745, 2801, 1587, 4485, 112, 6468, 1802, 5218, 1190, 1521, 2812, 31, 2343, 1926, 2022]], device='cuda:0') <class 'torch.Tensor'> fix codes shape: torch.Size([1, 509]), codes type: torch.int64 code len: tensor([509], device='cuda:0') 100%|██████████████████████████████████████████████████████████████████████████████████| 25/25 [03:05<00:00, 7.43s/it] torch.Size([1, 224000]) wav shape: torch.Size([1, 224000]) min: tensor(-18834.8320, device='cuda:0') max: tensor(25274.6660, device='cuda:0')

gpt_gen_time: 98.79 seconds gpt_forward_time: 0.33 seconds s2mel_time: 206.56 seconds bigvgan_time: 15.19 seconds Total inference time: 323.56 seconds Generated audio length: 26.73 seconds RTF: 12.1053 wav file saved to: gen.wav

回答

9

字数多,会切分句子分别跑

7

应该不是吧,字并不多,30字左右吧。试跑了几次,这是唯一一个跑两遍的