Close
Metadata

@InProceedings{FreitasFari:2018:UsTeMe,
               author = "Freitas, Pedro Garcia and Farias, Myl{\`e}ne C. Q.",
          affiliation = "{University of Bras{\'{\i}}lia} and {University of 
                         Bras{\'{\i}}lia}",
                title = "Using Texture Measures for Visual Quality Assessment",
            booktitle = "Proceedings...",
                 year = "2018",
               editor = "Ross, Arun and Gastal, Eduardo S. L. and Jorge, Joaquim A. and 
                         Queiroz, Ricardo L. de and Minetto, Rodrigo and Sarkar, Sudeep and 
                         Papa, Jo{\~a}o Paulo and Oliveira, Manuel M. and Arbel{\'a}ez, 
                         Pablo and Mery, Domingo and Oliveira, Maria Cristina Ferreira de 
                         and Spina, Thiago Vallin and Mendes, Caroline Mazetto and Costa, 
                         Henrique S{\'e}rgio Gutierrez and Mejail, Marta Estela and Geus, 
                         Klaus de and Scheer, Sergio",
         organization = "Conference on Graphics, Patterns and Images, 31. (SIBGRAPI)",
            publisher = "Sociedade Brasileira de Computa{\c{c}}{\~a}o",
              address = "Porto Alegre",
             keywords = "Visual quality, objective metrics, no-reference image quality 
                         assessment, video quality assessment.",
             abstract = "The automatic quality assessment of images and videos is a crucial 
                         problem for a wide range of applications in the fields of computer 
                         vision and multimedia processing. For instance, many computer 
                         vision applications, such as biometric identification, content 
                         retrieval, and object recognition, rely on input images with a 
                         specific range of quality. Therefore, a great research effort has 
                         been made to develop a visual quality assessment (VQA) methods 
                         that are able to automatically estimate quality. However, VQA 
                         still faces several challenges. In the case of images, most of the 
                         proposed methods are complex and require a reference (pristine 
                         image) to estimate the quality, which limits their use in several 
                         multimedia applications. For videos, the current state-of-the-art 
                         methods still perform worse than the methods designed for images, 
                         both in terms of prediction accuracy and computational complexity. 
                         In this work, we proposed a set of methods to estimate visual 
                         quality using texture descriptors and machine learning. Starting 
                         from the premise that visual impairments alter image and video 
                         texture statistics, we propose a framework that use these 
                         descriptors to produce new quality assessment methods, including 
                         no-reference (blind) and full-reference quality metrics. 
                         Experimental results indicate that the proposed metrics present a 
                         good performance when tested on several benchmark image and video 
                         quality databases, outperforming current state-of-the-art 
                         metrics.",
  conference-location = "Foz do Igua{\c{c}}u, PR, Brazil",
      conference-year = "Oct. 29 - Nov. 1, 2018",
             language = "en",
                  ibi = "8JMKD3MGPAW/3S3EAQ2",
                  url = "http://urlib.net/rep/8JMKD3MGPAW/3S3EAQ2",
           targetfile = "wtd-manuscript-CR.pdf",
        urlaccessdate = "2020, Dec. 02"
}


Close