{"created":"2021-03-01T06:20:43.364403+00:00","id":4067,"links":{},"metadata":{"_buckets":{"deposit":"a604df45-2211-4ef1-aff8-c0c9ebba0b9f"},"_deposit":{"id":"4067","owners":[],"pid":{"revision_id":0,"type":"depid","value":"4067"},"status":"published"},"_oai":{"id":"oai:repository.dl.itc.u-tokyo.ac.jp:00004067"},"item_7_alternative_title_1":{"attribute_name":"\u305d\u306e\u4ed6\u306e\u30bf\u30a4\u30c8\u30eb","attribute_value_mlt":[{"subitem_alternative_title":"Eigenvoice\u306b\u57fa\u3065\u304f\u30ad\u30e3\u30e9\u30af\u30bf\u30fc\u5909\u63db\u3068\u305d\u306e\u8a55\u4fa1"}]},"item_7_biblio_info_7":{"attribute_name":"\u66f8\u8a8c\u60c5\u5831","attribute_value_mlt":[{"bibliographicIssueDates":{"bibliographicIssueDate":"2012-09-27","bibliographicIssueDateType":"Issued"},"bibliographic_titles":[{}]}]},"item_7_date_granted_25":{"attribute_name":"\u5b66\u4f4d\u6388\u4e0e\u5e74\u6708\u65e5","attribute_value_mlt":[{"subitem_dategranted":"2012-09-27"}]},"item_7_degree_name_20":{"attribute_name":"\u5b66\u4f4d\u540d","attribute_value_mlt":[{"subitem_degreename":"\u4fee\u58eb(\u60c5\u5831\u7406\u5de5\u5b66)"}]},"item_7_description_5":{"attribute_name":"\u6284\u9332","attribute_value_mlt":[{"subitem_description":"This thesis describes a new method of voice conversion, which aims at character conversion based on eigenvoice GMM (EV-GMM) approach. Using an eigenvoice space built from 273 speakers and speech samples of three different characters created by a single skilled voice actor/actress, the conversion can generate the voices of the three characters from an arbitrary speaker, while keeping the speaker identity. Listening tests were carried out by presenting two kinds of synthetic voices; before and after the character conversion. The results showed that listeners, both native and non-native speakers, can perceive well the character voice difference as what was intended by experimenters. It was also shown that this difference was perceived well even when F0 difference between the two was very small, which indicates better performance of our method in character conversion compared to the general F0-based conversion. Further, acoustic comparison between different characters in two cases of the voice actor and the proposed method was made. Results showed that the proposed method can realize acoustically valid modification between different characters.","subitem_description_type":"Abstract"}]},"item_7_full_name_3":{"attribute_name":"\u8457\u8005\u5225\u540d","attribute_value_mlt":[{"nameIdentifiers":[{"nameIdentifier":"9378","nameIdentifierScheme":"WEKO"}],"names":[{"name":"\u30dd\u30f3\u30ad\u30c3\u30c6\u30a3\u30d1\u30f3, \u30c6\u30a3\u30fc\u30e9\u30dd\u30f3"}]}]},"item_7_select_21":{"attribute_name":"\u5b66\u4f4d","attribute_value_mlt":[{"subitem_select_item":"master"}]},"item_7_subject_13":{"attribute_name":"\u65e5\u672c\u5341\u9032\u5206\u985e\u6cd5","attribute_value_mlt":[{"subitem_subject":"007","subitem_subject_scheme":"NDC"}]},"item_7_text_24":{"attribute_name":"\u7814\u7a76\u79d1\u30fb\u5c02\u653b","attribute_value_mlt":[{"subitem_text_value":"\u60c5\u5831\u7406\u5de5\u5b66\u7cfb\u7814\u7a76\u79d1\u96fb\u5b50\u60c5\u5831\u5b66\u5c02\u653b"}]},"item_7_text_4":{"attribute_name":"\u8457\u8005\u6240\u5c5e","attribute_value_mlt":[{"subitem_text_value":"\u6771\u4eac\u5927\u5b66\u5927\u5b66\u9662\u60c5\u5831\u7406\u5de5\u5b66\u7cfb\u7814\u7a76\u79d1\u96fb\u5b50\u60c5\u5831\u5b66\u5c02\u653b"},{"subitem_text_value":"Department of Information and Communication Engineering, Graduate School of Information Science and Technology, The University of Tokyo"}]},"item_creator":{"attribute_name":"\u8457\u8005","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"Pongkittiphan, Teeraphon"}],"nameIdentifiers":[{"nameIdentifier":"9377","nameIdentifierScheme":"WEKO"}]}]},"item_files":{"attribute_name":"\u30d5\u30a1\u30a4\u30eb\u60c5\u5831","attribute_type":"file","attribute_value_mlt":[{"accessrole":"open_date","date":[{"dateType":"Available","dateValue":"2017-06-01"}],"displaytype":"detail","filename":"48106452.pdf","filesize":[{"value":"2.0 MB"}],"format":"application/pdf","licensetype":"license_note","mimetype":"application/pdf","url":{"label":"48106452.pdf","url":"https://repository.dl.itc.u-tokyo.ac.jp/record/4067/files/48106452.pdf"},"version_id":"62f2d806-6efb-4ab9-8ebb-1e8db4a948ab"}]},"item_language":{"attribute_name":"\u8a00\u8a9e","attribute_value_mlt":[{"subitem_language":"eng"}]},"item_resource_type":{"attribute_name":"\u8cc7\u6e90\u30bf\u30a4\u30d7","attribute_value_mlt":[{"resourcetype":"thesis","resourceuri":"http://purl.org/coar/resource_type/c_46ec"}]},"item_title":"Eigenvoice-based character conversion and its evaluations","item_titles":{"attribute_name":"\u30bf\u30a4\u30c8\u30eb","attribute_value_mlt":[{"subitem_title":"Eigenvoice-based character conversion and its evaluations"}]},"item_type_id":"7","owner":"1","path":["9/233/234","34/105/262"],"pubdate":{"attribute_name":"\u516c\u958b\u65e5","attribute_value":"2012-10-30"},"publish_date":"2012-10-30","publish_status":"0","recid":"4067","relation_version_is_last":true,"title":["Eigenvoice-based character conversion and its evaluations"],"weko_creator_id":"1","weko_shared_id":null},"updated":"2021-03-02T07:50:40.571497+00:00"}