Type: Dataset
Tags: Tracking, Web Crawling, Online Privacy, Browser Fingerprinting, World Wide Web
Bibtex:
Tags: Tracking, Web Crawling, Online Privacy, Browser Fingerprinting, World Wide Web
Bibtex:
@inproceedings{10.1145/3366423.3380104, author= {Zeber, David and Bird, Sarah and Oliveira, Camila and Rudametkin, Walter and Segall, Ilana and Wolls\'{e}n, Fredrik and Lopatka, Martin}, title= {The Representativeness of Automated Web Crawls as a Surrogate for Human Browsing}, year= {2020}, isbn= {9781450370233}, publisher= {Association for Computing Machinery}, address= {New York, NY, USA}, url= {https://doi.org/10.1145/3366423.3380104}, doi= {10.1145/3366423.3380104}, booktitle= {Proceedings of The Web Conference 2020}, pages= {167–178}, numpages= {12}, keywords= {Web Crawling, Online Privacy, Tracking, Browser Fingerprinting, World Wide Web}, location= {Taipei, Taiwan}, series= {WWW ’20}, abstract= {Large-scale Web crawls have emerged as the state of the art for studying characteristics of the Web. In particular, they are a core tool for online tracking research. Web crawling is an attractive approach to data collection, as crawls can be run at relatively low infrastructure cost and don’t require handling sensitive user data such as browsing histories. However, the biases introduced by using crawls as a proxy for human browsing data have not been well studied. Crawls may fail to capture the diversity of user environments, and the snapshot view of the Web presented by one-time crawls does not reflect its constantly evolving nature, which hinders reproducibility of crawl-based studies. In this paper, we quantify the repeatability and representativeness of Web crawls in terms of common tracking and fingerprinting metrics, considering both variation across crawls and divergence from human browser usage. We quantify baseline variation of simultaneous crawls, then isolate the effects of time, cloud IP address vs. residential, and operating system. This provides a foundation to assess the agreement between crawls visiting a standard list of high-traffic websites and actual browsing behaviour measured from an opt-in sample of over 50,000 users of the Firefox Web browser. Our analysis reveals differences between the treatment of stateless crawling infrastructure and generally stateful human browsing, showing, for example, that crawlers tend to experience higher rates of third-party activity than human browser users on loading pages from the same domains.}, terms= {}, license= {Mozilla Public License 2.0}, superseded= {} }